huge_memory.c 82.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 *  Copyright (C) 2009  Red Hat, Inc.
 */

6 7
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

8 9
#include <linux/mm.h>
#include <linux/sched.h>
10
#include <linux/sched/coredump.h>
11
#include <linux/sched/numa_balancing.h>
12 13 14 15 16
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/mmu_notifier.h>
#include <linux/rmap.h>
#include <linux/swap.h>
17
#include <linux/shrinker.h>
A
Andrea Arcangeli 已提交
18
#include <linux/mm_inline.h>
19
#include <linux/swapops.h>
M
Matthew Wilcox 已提交
20
#include <linux/dax.h>
A
Andrea Arcangeli 已提交
21
#include <linux/khugepaged.h>
22
#include <linux/freezer.h>
23
#include <linux/pfn_t.h>
A
Andrea Arcangeli 已提交
24
#include <linux/mman.h>
25
#include <linux/memremap.h>
R
Ralf Baechle 已提交
26
#include <linux/pagemap.h>
27
#include <linux/debugfs.h>
28
#include <linux/migrate.h>
29
#include <linux/hashtable.h>
30
#include <linux/userfaultfd_k.h>
31
#include <linux/page_idle.h>
32
#include <linux/shmem_fs.h>
33
#include <linux/oom.h>
34
#include <linux/numa.h>
35
#include <linux/page_owner.h>
36

37 38 39 40
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"

A
Andrea Arcangeli 已提交
41
/*
42 43 44 45
 * By default, transparent hugepage support is disabled in order to avoid
 * risking an increased memory footprint for applications that are not
 * guaranteed to benefit from it. When transparent hugepage support is
 * enabled, it is for all mappings, and khugepaged scans all mappings.
46 47
 * Defrag is invoked by khugepaged hugepage allocations and by page faults
 * for all hugepage allocations.
A
Andrea Arcangeli 已提交
48
 */
49
unsigned long transparent_hugepage_flags __read_mostly =
50
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
A
Andrea Arcangeli 已提交
51
	(1<<TRANSPARENT_HUGEPAGE_FLAG)|
52 53 54 55
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
	(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
#endif
56
	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
57 58
	(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
	(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
A
Andrea Arcangeli 已提交
59

60
static struct shrinker deferred_split_shrinker;
61

62
static atomic_t huge_zero_refcount;
63
struct page *huge_zero_page __read_mostly;
64

65 66
bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
67 68 69 70 71
	/* The addr is used to check if the vma size fits */
	unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;

	if (!transhuge_vma_suitable(vma, addr))
		return false;
72 73
	if (vma_is_anonymous(vma))
		return __transparent_hugepage_enabled(vma);
74 75
	if (vma_is_shmem(vma))
		return shmem_huge_enabled(vma);
76 77 78 79

	return false;
}

80
static struct page *get_huge_zero_page(void)
81 82 83 84
{
	struct page *zero_page;
retry:
	if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
85
		return READ_ONCE(huge_zero_page);
86 87

	zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
88
			HPAGE_PMD_ORDER);
89 90
	if (!zero_page) {
		count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
91
		return NULL;
92 93
	}
	count_vm_event(THP_ZERO_PAGE_ALLOC);
94
	preempt_disable();
95
	if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
96
		preempt_enable();
97
		__free_pages(zero_page, compound_order(zero_page));
98 99 100 101 102 103
		goto retry;
	}

	/* We take additional reference here. It will be put back by shrinker */
	atomic_set(&huge_zero_refcount, 2);
	preempt_enable();
104
	return READ_ONCE(huge_zero_page);
105 106
}

107
static void put_huge_zero_page(void)
108
{
109 110 111 112 113
	/*
	 * Counter should never go to zero here. Only shrinker can put
	 * last reference.
	 */
	BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
114 115
}

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
struct page *mm_get_huge_zero_page(struct mm_struct *mm)
{
	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
		return READ_ONCE(huge_zero_page);

	if (!get_huge_zero_page())
		return NULL;

	if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
		put_huge_zero_page();

	return READ_ONCE(huge_zero_page);
}

void mm_put_huge_zero_page(struct mm_struct *mm)
{
	if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
		put_huge_zero_page();
}

136 137
static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
					struct shrink_control *sc)
138
{
139 140 141
	/* we can free zero page only if last reference remains */
	return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
}
142

143 144 145
static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
				       struct shrink_control *sc)
{
146
	if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
147 148
		struct page *zero_page = xchg(&huge_zero_page, NULL);
		BUG_ON(zero_page == NULL);
149
		__free_pages(zero_page, compound_order(zero_page));
150
		return HPAGE_PMD_NR;
151 152 153
	}

	return 0;
154 155
}

156
static struct shrinker huge_zero_page_shrinker = {
157 158
	.count_objects = shrink_huge_zero_page_count,
	.scan_objects = shrink_huge_zero_page_scan,
159 160 161
	.seeks = DEFAULT_SEEKS,
};

162 163 164 165
#ifdef CONFIG_SYSFS
static ssize_t enabled_show(struct kobject *kobj,
			    struct kobj_attribute *attr, char *buf)
{
166 167
	const char *output;

168
	if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
169 170 171 172
		output = "[always] madvise never";
	else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
			  &transparent_hugepage_flags))
		output = "always [madvise] never";
173
	else
174 175 176
		output = "always madvise [never]";

	return sysfs_emit(buf, "%s\n", output);
177
}
178

179 180 181 182
static ssize_t enabled_store(struct kobject *kobj,
			     struct kobj_attribute *attr,
			     const char *buf, size_t count)
{
183
	ssize_t ret = count;
A
Andrea Arcangeli 已提交
184

185
	if (sysfs_streq(buf, "always")) {
186 187
		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
		set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
188
	} else if (sysfs_streq(buf, "madvise")) {
189 190
		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
191
	} else if (sysfs_streq(buf, "never")) {
192 193 194 195
		clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
	} else
		ret = -EINVAL;
A
Andrea Arcangeli 已提交
196 197

	if (ret > 0) {
198
		int err = start_stop_khugepaged();
A
Andrea Arcangeli 已提交
199 200 201 202
		if (err)
			ret = err;
	}
	return ret;
203 204 205 206
}
static struct kobj_attribute enabled_attr =
	__ATTR(enabled, 0644, enabled_show, enabled_store);

207
ssize_t single_hugepage_flag_show(struct kobject *kobj,
208 209
				  struct kobj_attribute *attr, char *buf,
				  enum transparent_hugepage_flag flag)
210
{
211 212
	return sysfs_emit(buf, "%d\n",
			  !!test_bit(flag, &transparent_hugepage_flags));
213
}
214

215
ssize_t single_hugepage_flag_store(struct kobject *kobj,
216 217 218 219
				 struct kobj_attribute *attr,
				 const char *buf, size_t count,
				 enum transparent_hugepage_flag flag)
{
220 221 222 223 224 225 226 227 228 229
	unsigned long value;
	int ret;

	ret = kstrtoul(buf, 10, &value);
	if (ret < 0)
		return ret;
	if (value > 1)
		return -EINVAL;

	if (value)
230
		set_bit(flag, &transparent_hugepage_flags);
231
	else
232 233 234 235 236 237 238 239
		clear_bit(flag, &transparent_hugepage_flags);

	return count;
}

static ssize_t defrag_show(struct kobject *kobj,
			   struct kobj_attribute *attr, char *buf)
{
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
	const char *output;

	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
		     &transparent_hugepage_flags))
		output = "[always] defer defer+madvise madvise never";
	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
			  &transparent_hugepage_flags))
		output = "always [defer] defer+madvise madvise never";
	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
			  &transparent_hugepage_flags))
		output = "always defer [defer+madvise] madvise never";
	else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
			  &transparent_hugepage_flags))
		output = "always defer defer+madvise [madvise] never";
	else
		output = "always defer defer+madvise madvise [never]";

	return sysfs_emit(buf, "%s\n", output);
258
}
259

260 261 262 263
static ssize_t defrag_store(struct kobject *kobj,
			    struct kobj_attribute *attr,
			    const char *buf, size_t count)
{
264
	if (sysfs_streq(buf, "always")) {
265 266 267 268
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
269
	} else if (sysfs_streq(buf, "defer+madvise")) {
270 271 272 273
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
274
	} else if (sysfs_streq(buf, "defer")) {
275 276 277 278
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
279
	} else if (sysfs_streq(buf, "madvise")) {
280 281 282 283
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
		set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
284
	} else if (sysfs_streq(buf, "never")) {
285 286 287 288 289 290 291 292
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
	} else
		return -EINVAL;

	return count;
293 294 295 296
}
static struct kobj_attribute defrag_attr =
	__ATTR(defrag, 0644, defrag_show, defrag_store);

297
static ssize_t use_zero_page_show(struct kobject *kobj,
298
				  struct kobj_attribute *attr, char *buf)
299
{
300
	return single_hugepage_flag_show(kobj, attr, buf,
301
					 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
302 303 304 305
}
static ssize_t use_zero_page_store(struct kobject *kobj,
		struct kobj_attribute *attr, const char *buf, size_t count)
{
306
	return single_hugepage_flag_store(kobj, attr, buf, count,
307 308 309 310
				 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
}
static struct kobj_attribute use_zero_page_attr =
	__ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
311 312

static ssize_t hpage_pmd_size_show(struct kobject *kobj,
313
				   struct kobj_attribute *attr, char *buf)
314
{
315
	return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
316 317 318 319
}
static struct kobj_attribute hpage_pmd_size_attr =
	__ATTR_RO(hpage_pmd_size);

320 321 322
static struct attribute *hugepage_attr[] = {
	&enabled_attr.attr,
	&defrag_attr.attr,
323
	&use_zero_page_attr.attr,
324
	&hpage_pmd_size_attr.attr,
325
#ifdef CONFIG_SHMEM
326
	&shmem_enabled_attr.attr,
327 328 329 330
#endif
	NULL,
};

331
static const struct attribute_group hugepage_attr_group = {
332
	.attrs = hugepage_attr,
A
Andrea Arcangeli 已提交
333 334
};

S
Shaohua Li 已提交
335
static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
336 337 338
{
	int err;

S
Shaohua Li 已提交
339 340
	*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
	if (unlikely(!*hugepage_kobj)) {
341
		pr_err("failed to create transparent hugepage kobject\n");
S
Shaohua Li 已提交
342
		return -ENOMEM;
A
Andrea Arcangeli 已提交
343 344
	}

S
Shaohua Li 已提交
345
	err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
A
Andrea Arcangeli 已提交
346
	if (err) {
347
		pr_err("failed to register transparent hugepage group\n");
S
Shaohua Li 已提交
348
		goto delete_obj;
A
Andrea Arcangeli 已提交
349 350
	}

S
Shaohua Li 已提交
351
	err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
A
Andrea Arcangeli 已提交
352
	if (err) {
353
		pr_err("failed to register transparent hugepage group\n");
S
Shaohua Li 已提交
354
		goto remove_hp_group;
A
Andrea Arcangeli 已提交
355
	}
S
Shaohua Li 已提交
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388

	return 0;

remove_hp_group:
	sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
delete_obj:
	kobject_put(*hugepage_kobj);
	return err;
}

static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
{
	sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
	sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
	kobject_put(hugepage_kobj);
}
#else
static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
{
	return 0;
}

static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
{
}
#endif /* CONFIG_SYSFS */

static int __init hugepage_init(void)
{
	int err;
	struct kobject *hugepage_kobj;

	if (!has_transparent_hugepage()) {
389 390 391 392 393
		/*
		 * Hardware doesn't support hugepages, hence disable
		 * DAX PMD support.
		 */
		transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
S
Shaohua Li 已提交
394 395 396
		return -EINVAL;
	}

397 398 399 400 401 402 403 404 405 406
	/*
	 * hugepages can't be allocated by the buddy allocator
	 */
	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
	/*
	 * we use page->mapping and page->index in second tail page
	 * as list_head: assuming THP order >= 2
	 */
	MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);

S
Shaohua Li 已提交
407 408
	err = hugepage_init_sysfs(&hugepage_kobj);
	if (err)
409
		goto err_sysfs;
A
Andrea Arcangeli 已提交
410

411
	err = khugepaged_init();
A
Andrea Arcangeli 已提交
412
	if (err)
413
		goto err_slab;
A
Andrea Arcangeli 已提交
414

415 416 417
	err = register_shrinker(&huge_zero_page_shrinker);
	if (err)
		goto err_hzp_shrinker;
418 419 420
	err = register_shrinker(&deferred_split_shrinker);
	if (err)
		goto err_split_shrinker;
421

422 423 424 425 426
	/*
	 * By default disable transparent hugepages on smaller systems,
	 * where the extra memory used could hurt more than TLB overhead
	 * is likely to save.  The admin can still enable it through /sys.
	 */
427
	if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
428
		transparent_hugepage_flags = 0;
429 430
		return 0;
	}
431

432
	err = start_stop_khugepaged();
433 434
	if (err)
		goto err_khugepaged;
A
Andrea Arcangeli 已提交
435

S
Shaohua Li 已提交
436
	return 0;
437
err_khugepaged:
438 439
	unregister_shrinker(&deferred_split_shrinker);
err_split_shrinker:
440 441
	unregister_shrinker(&huge_zero_page_shrinker);
err_hzp_shrinker:
442
	khugepaged_destroy();
443
err_slab:
S
Shaohua Li 已提交
444
	hugepage_exit_sysfs(hugepage_kobj);
445
err_sysfs:
A
Andrea Arcangeli 已提交
446
	return err;
447
}
448
subsys_initcall(hugepage_init);
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475

static int __init setup_transparent_hugepage(char *str)
{
	int ret = 0;
	if (!str)
		goto out;
	if (!strcmp(str, "always")) {
		set_bit(TRANSPARENT_HUGEPAGE_FLAG,
			&transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
			  &transparent_hugepage_flags);
		ret = 1;
	} else if (!strcmp(str, "madvise")) {
		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
			  &transparent_hugepage_flags);
		set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
			&transparent_hugepage_flags);
		ret = 1;
	} else if (!strcmp(str, "never")) {
		clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
			  &transparent_hugepage_flags);
		clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
			  &transparent_hugepage_flags);
		ret = 1;
	}
out:
	if (!ret)
476
		pr_warn("transparent_hugepage= cannot parse, ignored\n");
477 478 479 480
	return ret;
}
__setup("transparent_hugepage=", setup_transparent_hugepage);

481
pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
482
{
483
	if (likely(vma->vm_flags & VM_WRITE))
484 485 486 487
		pmd = pmd_mkwrite(pmd);
	return pmd;
}

488 489
#ifdef CONFIG_MEMCG
static inline struct deferred_split *get_deferred_split_queue(struct page *page)
490
{
491
	struct mem_cgroup *memcg = page_memcg(compound_head(page));
492 493 494 495 496 497
	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));

	if (memcg)
		return &memcg->deferred_split_queue;
	else
		return &pgdat->deferred_split_queue;
498
}
499 500 501 502 503 504 505 506
#else
static inline struct deferred_split *get_deferred_split_queue(struct page *page)
{
	struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));

	return &pgdat->deferred_split_queue;
}
#endif
507 508 509 510 511 512 513 514 515 516 517 518

void prep_transhuge_page(struct page *page)
{
	/*
	 * we use page->mapping and page->indexlru in second tail page
	 * as list_head: assuming THP order >= 2
	 */

	INIT_LIST_HEAD(page_deferred_list(page));
	set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
}

519 520 521
bool is_transparent_hugepage(struct page *page)
{
	if (!PageCompound(page))
Z
Zou Wei 已提交
522
		return false;
523 524 525 526 527 528 529

	page = compound_head(page);
	return is_huge_zero_page(page) ||
	       page[1].compound_dtor == TRANSHUGE_PAGE_DTOR;
}
EXPORT_SYMBOL_GPL(is_transparent_hugepage);

530 531
static unsigned long __thp_get_unmapped_area(struct file *filp,
		unsigned long addr, unsigned long len,
532 533 534 535
		loff_t off, unsigned long flags, unsigned long size)
{
	loff_t off_end = off + len;
	loff_t off_align = round_up(off, size);
536
	unsigned long len_pad, ret;
537 538 539 540 541 542 543 544

	if (off_end <= off_align || (off_end - off_align) < size)
		return 0;

	len_pad = len + size;
	if (len_pad < len || (off + len_pad) < off)
		return 0;

545
	ret = current->mm->get_unmapped_area(filp, addr, len_pad,
546
					      off >> PAGE_SHIFT, flags);
547 548 549 550 551 552

	/*
	 * The failure might be due to length padding. The caller will retry
	 * without the padding.
	 */
	if (IS_ERR_VALUE(ret))
553 554
		return 0;

555 556 557 558 559 560 561 562 563
	/*
	 * Do not try to align to THP boundary if allocation at the address
	 * hint succeeds.
	 */
	if (ret == addr)
		return addr;

	ret += (off - ret) & (size - 1);
	return ret;
564 565 566 567 568
}

unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
569
	unsigned long ret;
570 571 572 573 574
	loff_t off = (loff_t)pgoff << PAGE_SHIFT;

	if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
		goto out;

575 576 577 578
	ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
	if (ret)
		return ret;
out:
579 580 581 582
	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
}
EXPORT_SYMBOL_GPL(thp_get_unmapped_area);

583 584
static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
			struct page *page, gfp_t gfp)
585
{
J
Jan Kara 已提交
586
	struct vm_area_struct *vma = vmf->vma;
587
	pgtable_t pgtable;
J
Jan Kara 已提交
588
	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
589
	vm_fault_t ret = 0;
590

591
	VM_BUG_ON_PAGE(!PageCompound(page), page);
592

593
	if (mem_cgroup_charge(page, vma->vm_mm, gfp)) {
594 595
		put_page(page);
		count_vm_event(THP_FAULT_FALLBACK);
596
		count_vm_event(THP_FAULT_FALLBACK_CHARGE);
597 598
		return VM_FAULT_FALLBACK;
	}
599
	cgroup_throttle_swaprate(page, gfp);
600

601
	pgtable = pte_alloc_one(vma->vm_mm);
602
	if (unlikely(!pgtable)) {
603 604
		ret = VM_FAULT_OOM;
		goto release;
605
	}
606

607
	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
608 609 610 611 612
	/*
	 * The memory barrier inside __SetPageUptodate makes sure that
	 * clear_huge_page writes become visible before the set_pmd_at()
	 * write.
	 */
613 614
	__SetPageUptodate(page);

J
Jan Kara 已提交
615 616
	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
	if (unlikely(!pmd_none(*vmf->pmd))) {
617
		goto unlock_release;
618 619
	} else {
		pmd_t entry;
620

621 622 623 624
		ret = check_stable_address_space(vma->vm_mm);
		if (ret)
			goto unlock_release;

625 626
		/* Deliver the page fault to userland */
		if (userfaultfd_missing(vma)) {
J
Jan Kara 已提交
627
			spin_unlock(vmf->ptl);
628
			put_page(page);
K
Kirill A. Shutemov 已提交
629
			pte_free(vma->vm_mm, pgtable);
630 631 632
			ret = handle_userfault(vmf, VM_UFFD_MISSING);
			VM_BUG_ON(ret & VM_FAULT_FALLBACK);
			return ret;
633 634
		}

635
		entry = mk_huge_pmd(page, vma->vm_page_prot);
636
		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
637
		page_add_new_anon_rmap(page, vma, haddr, true);
638
		lru_cache_add_inactive_or_unevictable(page, vma);
J
Jan Kara 已提交
639 640
		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
641
		update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
K
Kirill A. Shutemov 已提交
642
		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
643
		mm_inc_nr_ptes(vma->vm_mm);
J
Jan Kara 已提交
644
		spin_unlock(vmf->ptl);
645
		count_vm_event(THP_FAULT_ALLOC);
646
		count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
647 648
	}

649
	return 0;
650 651 652 653 654 655 656 657
unlock_release:
	spin_unlock(vmf->ptl);
release:
	if (pgtable)
		pte_free(vma->vm_mm, pgtable);
	put_page(page);
	return ret;

658 659
}

660
/*
661 662 663 664 665 666 667
 * always: directly stall for all thp allocations
 * defer: wake kswapd and fail if not immediately available
 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
 *		  fail if not immediately available
 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
 *	    available
 * never: never stall for any thp allocation
668
 */
669
gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
670
{
671
	const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
672

673
	/* Always do synchronous compaction */
674 675
	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
676 677

	/* Kick kcompactd and fail quickly */
678
	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
679
		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
680 681

	/* Synchronous compaction if madvised, otherwise kick kcompactd */
682
	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
683 684 685
		return GFP_TRANSHUGE_LIGHT |
			(vma_madvised ? __GFP_DIRECT_RECLAIM :
					__GFP_KSWAPD_RECLAIM);
686 687

	/* Only do synchronous compaction if madvised */
688
	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
689 690
		return GFP_TRANSHUGE_LIGHT |
		       (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
691

692
	return GFP_TRANSHUGE_LIGHT;
693 694
}

695
/* Caller must hold page table lock. */
696
static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
697
		struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
698
		struct page *zero_page)
699 700
{
	pmd_t entry;
A
Andrew Morton 已提交
701
	if (!pmd_none(*pmd))
702
		return;
703
	entry = mk_pmd(zero_page, vma->vm_page_prot);
704
	entry = pmd_mkhuge(entry);
705 706
	if (pgtable)
		pgtable_trans_huge_deposit(mm, pmd, pgtable);
707
	set_pmd_at(mm, haddr, pmd, entry);
708
	mm_inc_nr_ptes(mm);
709 710
}

711
vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
712
{
J
Jan Kara 已提交
713
	struct vm_area_struct *vma = vmf->vma;
714
	gfp_t gfp;
715
	struct page *page;
J
Jan Kara 已提交
716
	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
717

718
	if (!transhuge_vma_suitable(vma, haddr))
719
		return VM_FAULT_FALLBACK;
720 721
	if (unlikely(anon_vma_prepare(vma)))
		return VM_FAULT_OOM;
722
	if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
723
		return VM_FAULT_OOM;
J
Jan Kara 已提交
724
	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
K
Kirill A. Shutemov 已提交
725
			!mm_forbids_zeropage(vma->vm_mm) &&
726 727 728
			transparent_hugepage_use_zero_page()) {
		pgtable_t pgtable;
		struct page *zero_page;
729
		vm_fault_t ret;
730
		pgtable = pte_alloc_one(vma->vm_mm);
731
		if (unlikely(!pgtable))
A
Andrea Arcangeli 已提交
732
			return VM_FAULT_OOM;
733
		zero_page = mm_get_huge_zero_page(vma->vm_mm);
734
		if (unlikely(!zero_page)) {
K
Kirill A. Shutemov 已提交
735
			pte_free(vma->vm_mm, pgtable);
736
			count_vm_event(THP_FAULT_FALLBACK);
737
			return VM_FAULT_FALLBACK;
A
Andrea Arcangeli 已提交
738
		}
J
Jan Kara 已提交
739
		vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
740
		ret = 0;
J
Jan Kara 已提交
741
		if (pmd_none(*vmf->pmd)) {
742 743 744
			ret = check_stable_address_space(vma->vm_mm);
			if (ret) {
				spin_unlock(vmf->ptl);
745
				pte_free(vma->vm_mm, pgtable);
746
			} else if (userfaultfd_missing(vma)) {
J
Jan Kara 已提交
747
				spin_unlock(vmf->ptl);
748
				pte_free(vma->vm_mm, pgtable);
J
Jan Kara 已提交
749
				ret = handle_userfault(vmf, VM_UFFD_MISSING);
750 751
				VM_BUG_ON(ret & VM_FAULT_FALLBACK);
			} else {
K
Kirill A. Shutemov 已提交
752
				set_huge_zero_page(pgtable, vma->vm_mm, vma,
J
Jan Kara 已提交
753
						   haddr, vmf->pmd, zero_page);
754
				update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
J
Jan Kara 已提交
755
				spin_unlock(vmf->ptl);
756
			}
757
		} else {
J
Jan Kara 已提交
758
			spin_unlock(vmf->ptl);
K
Kirill A. Shutemov 已提交
759
			pte_free(vma->vm_mm, pgtable);
760
		}
761
		return ret;
762
	}
763
	gfp = vma_thp_gfp_mask(vma);
764
	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
765 766
	if (unlikely(!page)) {
		count_vm_event(THP_FAULT_FALLBACK);
767
		return VM_FAULT_FALLBACK;
768
	}
769
	prep_transhuge_page(page);
J
Jan Kara 已提交
770
	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
771 772
}

773
static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
774 775
		pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
		pgtable_t pgtable)
M
Matthew Wilcox 已提交
776 777 778 779 780 781
{
	struct mm_struct *mm = vma->vm_mm;
	pmd_t entry;
	spinlock_t *ptl;

	ptl = pmd_lock(mm, pmd);
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
	if (!pmd_none(*pmd)) {
		if (write) {
			if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
				WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
				goto out_unlock;
			}
			entry = pmd_mkyoung(*pmd);
			entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
			if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
				update_mmu_cache_pmd(vma, addr, pmd);
		}

		goto out_unlock;
	}

797 798 799
	entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
	if (pfn_t_devmap(pfn))
		entry = pmd_mkdevmap(entry);
800
	if (write) {
801 802
		entry = pmd_mkyoung(pmd_mkdirty(entry));
		entry = maybe_pmd_mkwrite(entry, vma);
M
Matthew Wilcox 已提交
803
	}
804 805 806

	if (pgtable) {
		pgtable_trans_huge_deposit(mm, pmd, pgtable);
807
		mm_inc_nr_ptes(mm);
808
		pgtable = NULL;
809 810
	}

811 812
	set_pmd_at(mm, addr, pmd, entry);
	update_mmu_cache_pmd(vma, addr, pmd);
813 814

out_unlock:
M
Matthew Wilcox 已提交
815
	spin_unlock(ptl);
816 817
	if (pgtable)
		pte_free(mm, pgtable);
M
Matthew Wilcox 已提交
818 819
}

820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
/**
 * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
 * @vmf: Structure describing the fault
 * @pfn: pfn to insert
 * @pgprot: page protection to use
 * @write: whether it's a write fault
 *
 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
 * also consult the vmf_insert_mixed_prot() documentation when
 * @pgprot != @vmf->vma->vm_page_prot.
 *
 * Return: vm_fault_t value.
 */
vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
				   pgprot_t pgprot, bool write)
M
Matthew Wilcox 已提交
835
{
836 837
	unsigned long addr = vmf->address & PMD_MASK;
	struct vm_area_struct *vma = vmf->vma;
838
	pgtable_t pgtable = NULL;
839

M
Matthew Wilcox 已提交
840 841 842 843 844
	/*
	 * If we had pmd_special, we could avoid all these restrictions,
	 * but we need to be consistent with PTEs and architectures that
	 * can't support a 'special' bit.
	 */
845 846
	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
			!pfn_t_devmap(pfn));
M
Matthew Wilcox 已提交
847 848 849 850 851 852
	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
						(VM_PFNMAP|VM_MIXEDMAP));
	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));

	if (addr < vma->vm_start || addr >= vma->vm_end)
		return VM_FAULT_SIGBUS;
853

854
	if (arch_needs_pgtable_deposit()) {
855
		pgtable = pte_alloc_one(vma->vm_mm);
856 857 858 859
		if (!pgtable)
			return VM_FAULT_OOM;
	}

860 861
	track_pfn_insert(vma, &pgprot, pfn);

862
	insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
863
	return VM_FAULT_NOPAGE;
M
Matthew Wilcox 已提交
864
}
865
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
M
Matthew Wilcox 已提交
866

867
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
868
static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
869
{
870
	if (likely(vma->vm_flags & VM_WRITE))
871 872 873 874 875 876 877 878 879 880 881 882
		pud = pud_mkwrite(pud);
	return pud;
}

static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
		pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
{
	struct mm_struct *mm = vma->vm_mm;
	pud_t entry;
	spinlock_t *ptl;

	ptl = pud_lock(mm, pud);
883 884 885 886 887 888 889 890 891 892 893 894 895 896
	if (!pud_none(*pud)) {
		if (write) {
			if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
				WARN_ON_ONCE(!is_huge_zero_pud(*pud));
				goto out_unlock;
			}
			entry = pud_mkyoung(*pud);
			entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
			if (pudp_set_access_flags(vma, addr, pud, entry, 1))
				update_mmu_cache_pud(vma, addr, pud);
		}
		goto out_unlock;
	}

897 898 899 900
	entry = pud_mkhuge(pfn_t_pud(pfn, prot));
	if (pfn_t_devmap(pfn))
		entry = pud_mkdevmap(entry);
	if (write) {
901 902
		entry = pud_mkyoung(pud_mkdirty(entry));
		entry = maybe_pud_mkwrite(entry, vma);
903 904 905
	}
	set_pud_at(mm, addr, pud, entry);
	update_mmu_cache_pud(vma, addr, pud);
906 907

out_unlock:
908 909 910
	spin_unlock(ptl);
}

911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
/**
 * vmf_insert_pfn_pud_prot - insert a pud size pfn
 * @vmf: Structure describing the fault
 * @pfn: pfn to insert
 * @pgprot: page protection to use
 * @write: whether it's a write fault
 *
 * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
 * also consult the vmf_insert_mixed_prot() documentation when
 * @pgprot != @vmf->vma->vm_page_prot.
 *
 * Return: vm_fault_t value.
 */
vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
				   pgprot_t pgprot, bool write)
926
{
927 928 929
	unsigned long addr = vmf->address & PUD_MASK;
	struct vm_area_struct *vma = vmf->vma;

930 931 932 933 934
	/*
	 * If we had pud_special, we could avoid all these restrictions,
	 * but we need to be consistent with PTEs and architectures that
	 * can't support a 'special' bit.
	 */
935 936
	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
			!pfn_t_devmap(pfn));
937 938 939 940 941 942 943 944 945
	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
						(VM_PFNMAP|VM_MIXEDMAP));
	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));

	if (addr < vma->vm_start || addr >= vma->vm_end)
		return VM_FAULT_SIGBUS;

	track_pfn_insert(vma, &pgprot, pfn);

946
	insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
947 948
	return VM_FAULT_NOPAGE;
}
949
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
950 951
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */

952
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
953
		pmd_t *pmd, int flags)
954 955 956
{
	pmd_t _pmd;

957 958 959
	_pmd = pmd_mkyoung(*pmd);
	if (flags & FOLL_WRITE)
		_pmd = pmd_mkdirty(_pmd);
960
	if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
961
				pmd, _pmd, flags & FOLL_WRITE))
962 963 964 965
		update_mmu_cache_pmd(vma, addr, pmd);
}

struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
966
		pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
967 968 969 970 971 972 973
{
	unsigned long pfn = pmd_pfn(*pmd);
	struct mm_struct *mm = vma->vm_mm;
	struct page *page;

	assert_spin_locked(pmd_lockptr(mm, pmd));

974 975 976 977 978 979
	/*
	 * When we COW a devmap PMD entry, we split it into PTEs, so we should
	 * not be in this function with `flags & FOLL_COW` set.
	 */
	WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");

J
John Hubbard 已提交
980 981 982 983 984
	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
			 (FOLL_PIN | FOLL_GET)))
		return NULL;

985
	if (flags & FOLL_WRITE && !pmd_write(*pmd))
986 987 988 989 990 991 992 993
		return NULL;

	if (pmd_present(*pmd) && pmd_devmap(*pmd))
		/* pass */;
	else
		return NULL;

	if (flags & FOLL_TOUCH)
994
		touch_pmd(vma, addr, pmd, flags);
995 996 997 998 999

	/*
	 * device mapped pages can only be returned if the
	 * caller will manage the page reference count.
	 */
J
John Hubbard 已提交
1000
	if (!(flags & (FOLL_GET | FOLL_PIN)))
1001 1002 1003
		return ERR_PTR(-EEXIST);

	pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1004 1005
	*pgmap = get_dev_pagemap(pfn, *pgmap);
	if (!*pgmap)
1006 1007
		return ERR_PTR(-EFAULT);
	page = pfn_to_page(pfn);
J
John Hubbard 已提交
1008 1009
	if (!try_grab_page(page, flags))
		page = ERR_PTR(-ENOMEM);
1010 1011 1012 1013

	return page;
}

1014 1015 1016 1017
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
		  struct vm_area_struct *vma)
{
1018
	spinlock_t *dst_ptl, *src_ptl;
1019 1020
	struct page *src_page;
	pmd_t pmd;
1021
	pgtable_t pgtable = NULL;
1022
	int ret = -ENOMEM;
1023

1024 1025 1026 1027
	/* Skip if can be re-fill on fault */
	if (!vma_is_anonymous(vma))
		return 0;

1028
	pgtable = pte_alloc_one(dst_mm);
1029 1030
	if (unlikely(!pgtable))
		goto out;
1031

1032 1033 1034
	dst_ptl = pmd_lock(dst_mm, dst_pmd);
	src_ptl = pmd_lockptr(src_mm, src_pmd);
	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1035 1036 1037

	ret = -EAGAIN;
	pmd = *src_pmd;
1038

1039 1040 1041 1042 1043 1044 1045 1046
	/*
	 * Make sure the _PAGE_UFFD_WP bit is cleared if the new VMA
	 * does not have the VM_UFFD_WP, which means that the uffd
	 * fork event is not enabled.
	 */
	if (!(vma->vm_flags & VM_UFFD_WP))
		pmd = pmd_clear_uffd_wp(pmd);

1047 1048 1049 1050 1051 1052 1053 1054
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
	if (unlikely(is_swap_pmd(pmd))) {
		swp_entry_t entry = pmd_to_swp_entry(pmd);

		VM_BUG_ON(!is_pmd_migration_entry(pmd));
		if (is_write_migration_entry(entry)) {
			make_migration_entry_read(&entry);
			pmd = swp_entry_to_pmd(entry);
1055 1056
			if (pmd_swp_soft_dirty(*src_pmd))
				pmd = pmd_swp_mksoft_dirty(pmd);
1057 1058
			set_pmd_at(src_mm, addr, src_pmd, pmd);
		}
1059
		add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1060
		mm_inc_nr_ptes(dst_mm);
1061
		pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1062 1063 1064 1065 1066 1067
		set_pmd_at(dst_mm, addr, dst_pmd, pmd);
		ret = 0;
		goto out_unlock;
	}
#endif

1068
	if (unlikely(!pmd_trans_huge(pmd))) {
1069 1070 1071
		pte_free(dst_mm, pgtable);
		goto out_unlock;
	}
1072
	/*
1073
	 * When page table lock is held, the huge zero pmd should not be
1074 1075 1076 1077
	 * under splitting since we don't split the page itself, only pmd to
	 * a page table.
	 */
	if (is_huge_zero_pmd(pmd)) {
1078
		struct page *zero_page;
1079 1080 1081 1082 1083
		/*
		 * get_huge_zero_page() will never allocate a new page here,
		 * since we already have a zero page to copy. It just takes a
		 * reference.
		 */
1084
		zero_page = mm_get_huge_zero_page(dst_mm);
1085
		set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
1086
				zero_page);
1087 1088 1089
		ret = 0;
		goto out_unlock;
	}
1090

1091 1092
	src_page = pmd_page(pmd);
	VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1093 1094 1095 1096 1097 1098 1099 1100

	/*
	 * If this page is a potentially pinned page, split and retry the fault
	 * with smaller page size.  Normally this should not happen because the
	 * userspace should use MADV_DONTFORK upon pinned regions.  This is a
	 * best effort that the pinned pages won't be replaced by another
	 * random page during the coming copy-on-write.
	 */
1101
	if (unlikely(page_needs_cow_for_dma(vma, src_page))) {
1102 1103 1104 1105 1106 1107 1108
		pte_free(dst_mm, pgtable);
		spin_unlock(src_ptl);
		spin_unlock(dst_ptl);
		__split_huge_pmd(vma, src_pmd, addr, false, NULL);
		return -EAGAIN;
	}

1109 1110 1111
	get_page(src_page);
	page_dup_rmap(src_page, true);
	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1112
	mm_inc_nr_ptes(dst_mm);
1113
	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1114 1115 1116 1117 1118 1119 1120

	pmdp_set_wrprotect(src_mm, addr, src_pmd);
	pmd = pmd_mkold(pmd_wrprotect(pmd));
	set_pmd_at(dst_mm, addr, dst_pmd, pmd);

	ret = 0;
out_unlock:
1121 1122
	spin_unlock(src_ptl);
	spin_unlock(dst_ptl);
1123 1124 1125 1126
out:
	return ret;
}

1127 1128
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1129
		pud_t *pud, int flags)
1130 1131 1132
{
	pud_t _pud;

1133 1134 1135
	_pud = pud_mkyoung(*pud);
	if (flags & FOLL_WRITE)
		_pud = pud_mkdirty(_pud);
1136
	if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1137
				pud, _pud, flags & FOLL_WRITE))
1138 1139 1140 1141
		update_mmu_cache_pud(vma, addr, pud);
}

struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1142
		pud_t *pud, int flags, struct dev_pagemap **pgmap)
1143 1144 1145 1146 1147 1148 1149
{
	unsigned long pfn = pud_pfn(*pud);
	struct mm_struct *mm = vma->vm_mm;
	struct page *page;

	assert_spin_locked(pud_lockptr(mm, pud));

1150
	if (flags & FOLL_WRITE && !pud_write(*pud))
1151 1152
		return NULL;

J
John Hubbard 已提交
1153 1154 1155 1156 1157
	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
			 (FOLL_PIN | FOLL_GET)))
		return NULL;

1158 1159 1160 1161 1162 1163
	if (pud_present(*pud) && pud_devmap(*pud))
		/* pass */;
	else
		return NULL;

	if (flags & FOLL_TOUCH)
1164
		touch_pud(vma, addr, pud, flags);
1165 1166 1167 1168

	/*
	 * device mapped pages can only be returned if the
	 * caller will manage the page reference count.
J
John Hubbard 已提交
1169 1170
	 *
	 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1171
	 */
J
John Hubbard 已提交
1172
	if (!(flags & (FOLL_GET | FOLL_PIN)))
1173 1174 1175
		return ERR_PTR(-EEXIST);

	pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1176 1177
	*pgmap = get_dev_pagemap(pfn, *pgmap);
	if (!*pgmap)
1178 1179
		return ERR_PTR(-EFAULT);
	page = pfn_to_page(pfn);
J
John Hubbard 已提交
1180 1181
	if (!try_grab_page(page, flags))
		page = ERR_PTR(-ENOMEM);
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211

	return page;
}

int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		  pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
		  struct vm_area_struct *vma)
{
	spinlock_t *dst_ptl, *src_ptl;
	pud_t pud;
	int ret;

	dst_ptl = pud_lock(dst_mm, dst_pud);
	src_ptl = pud_lockptr(src_mm, src_pud);
	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);

	ret = -EAGAIN;
	pud = *src_pud;
	if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
		goto out_unlock;

	/*
	 * When page table lock is held, the huge zero pud should not be
	 * under splitting since we don't split the page itself, only pud to
	 * a page table.
	 */
	if (is_huge_zero_pud(pud)) {
		/* No huge zero pud yet */
	}

1212
	/* Please refer to comments in copy_huge_pmd() */
1213
	if (unlikely(page_needs_cow_for_dma(vma, pud_page(pud)))) {
1214 1215 1216 1217 1218 1219
		spin_unlock(src_ptl);
		spin_unlock(dst_ptl);
		__split_huge_pud(vma, src_pud, addr);
		return -EAGAIN;
	}

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	pudp_set_wrprotect(src_mm, addr, src_pud);
	pud = pud_mkold(pud_wrprotect(pud));
	set_pud_at(dst_mm, addr, dst_pud, pud);

	ret = 0;
out_unlock:
	spin_unlock(src_ptl);
	spin_unlock(dst_ptl);
	return ret;
}

void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
	pud_t entry;
	unsigned long haddr;
	bool write = vmf->flags & FAULT_FLAG_WRITE;

	vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
	if (unlikely(!pud_same(*vmf->pud, orig_pud)))
		goto unlock;

	entry = pud_mkyoung(orig_pud);
	if (write)
		entry = pud_mkdirty(entry);
	haddr = vmf->address & HPAGE_PUD_MASK;
	if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
		update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);

unlock:
	spin_unlock(vmf->ptl);
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */

J
Jan Kara 已提交
1253
void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
1254 1255 1256
{
	pmd_t entry;
	unsigned long haddr;
1257
	bool write = vmf->flags & FAULT_FLAG_WRITE;
1258

J
Jan Kara 已提交
1259 1260
	vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1261 1262 1263
		goto unlock;

	entry = pmd_mkyoung(orig_pmd);
1264 1265
	if (write)
		entry = pmd_mkdirty(entry);
J
Jan Kara 已提交
1266
	haddr = vmf->address & HPAGE_PMD_MASK;
1267
	if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
J
Jan Kara 已提交
1268
		update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
1269 1270

unlock:
J
Jan Kara 已提交
1271
	spin_unlock(vmf->ptl);
1272 1273
}

1274
vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
1275
{
J
Jan Kara 已提交
1276
	struct vm_area_struct *vma = vmf->vma;
1277
	struct page *page;
J
Jan Kara 已提交
1278
	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1279

J
Jan Kara 已提交
1280
	vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1281
	VM_BUG_ON_VMA(!vma->anon_vma, vma);
1282

1283
	if (is_huge_zero_pmd(orig_pmd))
1284 1285
		goto fallback;

J
Jan Kara 已提交
1286
	spin_lock(vmf->ptl);
1287 1288 1289 1290 1291

	if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
		spin_unlock(vmf->ptl);
		return 0;
	}
1292 1293

	page = pmd_page(orig_pmd);
1294
	VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1295 1296

	/* Lock page for reuse_swap_page() */
1297 1298 1299 1300 1301 1302
	if (!trylock_page(page)) {
		get_page(page);
		spin_unlock(vmf->ptl);
		lock_page(page);
		spin_lock(vmf->ptl);
		if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1303
			spin_unlock(vmf->ptl);
1304 1305
			unlock_page(page);
			put_page(page);
1306
			return 0;
1307 1308 1309
		}
		put_page(page);
	}
1310 1311 1312 1313 1314

	/*
	 * We can only reuse the page if nobody else maps the huge page or it's
	 * part.
	 */
1315
	if (reuse_swap_page(page, NULL)) {
1316 1317
		pmd_t entry;
		entry = pmd_mkyoung(orig_pmd);
1318
		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1319
		if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
J
Jan Kara 已提交
1320
			update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1321
		unlock_page(page);
J
Jan Kara 已提交
1322
		spin_unlock(vmf->ptl);
1323
		return VM_FAULT_WRITE;
1324
	}
1325 1326

	unlock_page(page);
J
Jan Kara 已提交
1327
	spin_unlock(vmf->ptl);
1328 1329 1330
fallback:
	__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
	return VM_FAULT_FALLBACK;
1331 1332
}

1333
/*
1334 1335
 * FOLL_FORCE can write to even unwritable pmd's, but only
 * after we've gone through a COW cycle and they are dirty.
1336 1337 1338
 */
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
1339 1340
	return pmd_write(pmd) ||
	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1341 1342
}

1343
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1344 1345 1346 1347
				   unsigned long addr,
				   pmd_t *pmd,
				   unsigned int flags)
{
1348
	struct mm_struct *mm = vma->vm_mm;
1349 1350
	struct page *page = NULL;

1351
	assert_spin_locked(pmd_lockptr(mm, pmd));
1352

1353
	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1354 1355
		goto out;

1356 1357 1358 1359
	/* Avoid dumping huge zero page */
	if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
		return ERR_PTR(-EFAULT);

1360
	/* Full NUMA hinting faults to serialise migration in fault paths */
1361
	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1362 1363
		goto out;

1364
	page = pmd_page(*pmd);
1365
	VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
J
John Hubbard 已提交
1366 1367 1368 1369

	if (!try_grab_page(page, flags))
		return ERR_PTR(-ENOMEM);

1370
	if (flags & FOLL_TOUCH)
1371
		touch_pmd(vma, addr, pmd, flags);
J
John Hubbard 已提交
1372

E
Eric B Munson 已提交
1373
	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1374 1375 1376 1377
		/*
		 * We don't mlock() pte-mapped THPs. This way we can avoid
		 * leaking mlocked pages into non-VM_LOCKED VMAs.
		 *
1378 1379
		 * For anon THP:
		 *
1380 1381 1382 1383 1384 1385 1386
		 * In most cases the pmd is the only mapping of the page as we
		 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
		 * writable private mappings in populate_vma_page_range().
		 *
		 * The only scenario when we have the page shared here is if we
		 * mlocking read-only mapping shared over fork(). We skip
		 * mlocking such pages.
1387 1388 1389 1390 1391 1392
		 *
		 * For file THP:
		 *
		 * We can expect PageDoubleMap() to be stable under page lock:
		 * for file pages we set it in page_add_file_rmap(), which
		 * requires page to be locked.
1393
		 */
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403

		if (PageAnon(page) && compound_mapcount(page) != 1)
			goto skip_mlock;
		if (PageDoubleMap(page) || !page->mapping)
			goto skip_mlock;
		if (!trylock_page(page))
			goto skip_mlock;
		if (page->mapping && !PageDoubleMap(page))
			mlock_vma_page(page);
		unlock_page(page);
1404
	}
1405
skip_mlock:
1406
	page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1407
	VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1408 1409 1410 1411 1412

out:
	return page;
}

1413
/* NUMA hinting page fault entry point for trans huge pmds */
1414
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1415
{
J
Jan Kara 已提交
1416
	struct vm_area_struct *vma = vmf->vma;
1417
	struct anon_vma *anon_vma = NULL;
1418
	struct page *page;
J
Jan Kara 已提交
1419
	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1420
	int page_nid = NUMA_NO_NODE, this_nid = numa_node_id();
1421
	int target_nid, last_cpupid = -1;
1422 1423
	bool page_locked;
	bool migrated = false;
1424
	bool was_writable;
1425
	int flags = 0;
1426

J
Jan Kara 已提交
1427 1428
	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
	if (unlikely(!pmd_same(pmd, *vmf->pmd)))
1429 1430
		goto out_unlock;

1431 1432 1433 1434 1435
	/*
	 * If there are potential migrations, wait for completion and retry
	 * without disrupting NUMA hinting information. Do not relock and
	 * check_same as the page may no longer be mapped.
	 */
J
Jan Kara 已提交
1436 1437
	if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
		page = pmd_page(*vmf->pmd);
1438 1439
		if (!get_page_unless_zero(page))
			goto out_unlock;
J
Jan Kara 已提交
1440
		spin_unlock(vmf->ptl);
1441
		put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
1442 1443 1444
		goto out;
	}

1445
	page = pmd_page(pmd);
1446
	BUG_ON(is_huge_zero_page(page));
1447
	page_nid = page_to_nid(page);
1448
	last_cpupid = page_cpupid_last(page);
1449
	count_vm_numa_event(NUMA_HINT_FAULTS);
1450
	if (page_nid == this_nid) {
1451
		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1452 1453
		flags |= TNF_FAULT_LOCAL;
	}
1454

1455
	/* See similar comment in do_numa_page for explanation */
1456
	if (!pmd_savedwrite(pmd))
1457 1458
		flags |= TNF_NO_GROUP;

1459 1460 1461 1462
	/*
	 * Acquire the page lock to serialise THP migrations but avoid dropping
	 * page_table_lock if at all possible
	 */
1463 1464
	page_locked = trylock_page(page);
	target_nid = mpol_misplaced(page, vma, haddr);
1465
	if (target_nid == NUMA_NO_NODE) {
1466
		/* If the page was locked, there are no parallel migrations */
1467
		if (page_locked)
1468
			goto clear_pmdnuma;
1469
	}
1470

1471
	/* Migration could have started since the pmd_trans_migrating check */
1472
	if (!page_locked) {
1473
		page_nid = NUMA_NO_NODE;
1474 1475
		if (!get_page_unless_zero(page))
			goto out_unlock;
J
Jan Kara 已提交
1476
		spin_unlock(vmf->ptl);
1477
		put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
1478 1479 1480
		goto out;
	}

1481 1482 1483 1484
	/*
	 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
	 * to serialises splits
	 */
1485
	get_page(page);
J
Jan Kara 已提交
1486
	spin_unlock(vmf->ptl);
1487
	anon_vma = page_lock_anon_vma_read(page);
1488

P
Peter Zijlstra 已提交
1489
	/* Confirm the PMD did not change while page_table_lock was released */
J
Jan Kara 已提交
1490 1491
	spin_lock(vmf->ptl);
	if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
1492 1493
		unlock_page(page);
		put_page(page);
1494
		page_nid = NUMA_NO_NODE;
1495
		goto out_unlock;
1496
	}
1497

1498 1499 1500
	/* Bail if we fail to protect against THP splits for any reason */
	if (unlikely(!anon_vma)) {
		put_page(page);
1501
		page_nid = NUMA_NO_NODE;
1502 1503 1504
		goto clear_pmdnuma;
	}

1505 1506 1507 1508 1509 1510
	/*
	 * Since we took the NUMA fault, we must have observed the !accessible
	 * bit. Make sure all other CPUs agree with that, to avoid them
	 * modifying the page we're about to migrate.
	 *
	 * Must be done under PTL such that we'll observe the relevant
1511 1512 1513 1514
	 * inc_tlb_flush_pending().
	 *
	 * We are not sure a pending tlb flush here is for a huge page
	 * mapping or not. Hence use the tlb range variant
1515
	 */
1516
	if (mm_tlb_flush_pending(vma->vm_mm)) {
1517
		flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529
		/*
		 * change_huge_pmd() released the pmd lock before
		 * invalidating the secondary MMUs sharing the primary
		 * MMU pagetables (with ->invalidate_range()). The
		 * mmu_notifier_invalidate_range_end() (which
		 * internally calls ->invalidate_range()) in
		 * change_pmd_range() will run after us, so we can't
		 * rely on it here and we need an explicit invalidate.
		 */
		mmu_notifier_invalidate_range(vma->vm_mm, haddr,
					      haddr + HPAGE_PMD_SIZE);
	}
1530

1531 1532
	/*
	 * Migrate the THP to the requested node, returns with page unlocked
1533
	 * and access rights restored.
1534
	 */
J
Jan Kara 已提交
1535
	spin_unlock(vmf->ptl);
1536

K
Kirill A. Shutemov 已提交
1537
	migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
J
Jan Kara 已提交
1538
				vmf->pmd, pmd, vmf->address, page, target_nid);
1539 1540
	if (migrated) {
		flags |= TNF_MIGRATED;
1541
		page_nid = target_nid;
1542 1543
	} else
		flags |= TNF_MIGRATE_FAIL;
1544

1545
	goto out;
1546
clear_pmdnuma:
1547
	BUG_ON(!PageLocked(page));
1548
	was_writable = pmd_savedwrite(pmd);
1549
	pmd = pmd_modify(pmd, vma->vm_page_prot);
1550
	pmd = pmd_mkyoung(pmd);
1551 1552
	if (was_writable)
		pmd = pmd_mkwrite(pmd);
J
Jan Kara 已提交
1553 1554
	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
	update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1555
	unlock_page(page);
1556
out_unlock:
J
Jan Kara 已提交
1557
	spin_unlock(vmf->ptl);
1558 1559 1560 1561 1562

out:
	if (anon_vma)
		page_unlock_anon_vma_read(anon_vma);

1563
	if (page_nid != NUMA_NO_NODE)
J
Jan Kara 已提交
1564
		task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1565
				flags);
1566

1567 1568 1569
	return 0;
}

1570 1571 1572 1573 1574
/*
 * Return true if we do MADV_FREE successfully on entire pmd page.
 * Otherwise, return false.
 */
bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1575 1576 1577 1578 1579 1580
		pmd_t *pmd, unsigned long addr, unsigned long next)
{
	spinlock_t *ptl;
	pmd_t orig_pmd;
	struct page *page;
	struct mm_struct *mm = tlb->mm;
1581
	bool ret = false;
1582

1583
	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1584

1585 1586
	ptl = pmd_trans_huge_lock(pmd, vma);
	if (!ptl)
1587
		goto out_unlocked;
1588 1589

	orig_pmd = *pmd;
1590
	if (is_huge_zero_pmd(orig_pmd))
1591 1592
		goto out;

1593 1594 1595 1596 1597 1598
	if (unlikely(!pmd_present(orig_pmd))) {
		VM_BUG_ON(thp_migration_supported() &&
				  !is_pmd_migration_entry(orig_pmd));
		goto out;
	}

1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
	page = pmd_page(orig_pmd);
	/*
	 * If other processes are mapping this page, we couldn't discard
	 * the page unless they all do MADV_FREE so let's skip the page.
	 */
	if (page_mapcount(page) != 1)
		goto out;

	if (!trylock_page(page))
		goto out;

	/*
	 * If user want to discard part-pages of THP, split it so MADV_FREE
	 * will deactivate only them.
	 */
	if (next - addr != HPAGE_PMD_SIZE) {
		get_page(page);
		spin_unlock(ptl);
1617
		split_huge_page(page);
1618
		unlock_page(page);
1619
		put_page(page);
1620 1621 1622 1623 1624 1625 1626 1627
		goto out_unlocked;
	}

	if (PageDirty(page))
		ClearPageDirty(page);
	unlock_page(page);

	if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1628
		pmdp_invalidate(vma, addr, pmd);
1629 1630 1631 1632 1633 1634
		orig_pmd = pmd_mkold(orig_pmd);
		orig_pmd = pmd_mkclean(orig_pmd);

		set_pmd_at(mm, addr, pmd, orig_pmd);
		tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
	}
S
Shaohua Li 已提交
1635 1636

	mark_page_lazyfree(page);
1637
	ret = true;
1638 1639 1640 1641 1642 1643
out:
	spin_unlock(ptl);
out_unlocked:
	return ret;
}

1644 1645 1646 1647 1648 1649
static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
{
	pgtable_t pgtable;

	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
	pte_free(mm, pgtable);
1650
	mm_dec_nr_ptes(mm);
1651 1652
}

1653
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
S
Shaohua Li 已提交
1654
		 pmd_t *pmd, unsigned long addr)
1655
{
1656
	pmd_t orig_pmd;
1657
	spinlock_t *ptl;
1658

1659
	tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1660

1661 1662
	ptl = __pmd_trans_huge_lock(pmd, vma);
	if (!ptl)
1663 1664 1665 1666 1667 1668 1669
		return 0;
	/*
	 * For architectures like ppc64 we look at deposited pgtable
	 * when calling pmdp_huge_get_and_clear. So do the
	 * pgtable_trans_huge_withdraw after finishing pmdp related
	 * operations.
	 */
1670 1671
	orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
						tlb->fullmm);
1672
	tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1673
	if (vma_is_special_huge(vma)) {
1674 1675
		if (arch_needs_pgtable_deposit())
			zap_deposited_table(tlb->mm, pmd);
1676 1677
		spin_unlock(ptl);
		if (is_huge_zero_pmd(orig_pmd))
1678
			tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1679
	} else if (is_huge_zero_pmd(orig_pmd)) {
1680
		zap_deposited_table(tlb->mm, pmd);
1681
		spin_unlock(ptl);
1682
		tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1683
	} else {
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
		struct page *page = NULL;
		int flush_needed = 1;

		if (pmd_present(orig_pmd)) {
			page = pmd_page(orig_pmd);
			page_remove_rmap(page, true);
			VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
			VM_BUG_ON_PAGE(!PageHead(page), page);
		} else if (thp_migration_supported()) {
			swp_entry_t entry;

			VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
			entry = pmd_to_swp_entry(orig_pmd);
			page = pfn_to_page(swp_offset(entry));
			flush_needed = 0;
		} else
			WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");

1702
		if (PageAnon(page)) {
1703
			zap_deposited_table(tlb->mm, pmd);
1704 1705
			add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
		} else {
1706 1707
			if (arch_needs_pgtable_deposit())
				zap_deposited_table(tlb->mm, pmd);
1708
			add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1709
		}
1710

1711
		spin_unlock(ptl);
1712 1713
		if (flush_needed)
			tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1714
	}
1715
	return 1;
1716 1717
}

1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
#ifndef pmd_move_must_withdraw
static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
					 spinlock_t *old_pmd_ptl,
					 struct vm_area_struct *vma)
{
	/*
	 * With split pmd lock we also need to move preallocated
	 * PTE page table if new_pmd is on different PMD page table.
	 *
	 * We also don't deposit and withdraw tables for file pages.
	 */
	return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
}
#endif

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
static pmd_t move_soft_dirty_pmd(pmd_t pmd)
{
#ifdef CONFIG_MEM_SOFT_DIRTY
	if (unlikely(is_pmd_migration_entry(pmd)))
		pmd = pmd_swp_mksoft_dirty(pmd);
	else if (pmd_present(pmd))
		pmd = pmd_mksoft_dirty(pmd);
#endif
	return pmd;
}

1744
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1745
		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1746
{
1747
	spinlock_t *old_ptl, *new_ptl;
1748 1749
	pmd_t pmd;
	struct mm_struct *mm = vma->vm_mm;
1750
	bool force_flush = false;
1751 1752 1753 1754 1755 1756 1757

	/*
	 * The destination pmd shouldn't be established, free_pgtables()
	 * should have release it.
	 */
	if (WARN_ON(!pmd_none(*new_pmd))) {
		VM_BUG_ON(pmd_trans_huge(*new_pmd));
1758
		return false;
1759 1760
	}

1761 1762
	/*
	 * We don't have to worry about the ordering of src and dst
1763
	 * ptlocks because exclusive mmap_lock prevents deadlock.
1764
	 */
1765 1766
	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
	if (old_ptl) {
1767 1768 1769
		new_ptl = pmd_lockptr(mm, new_pmd);
		if (new_ptl != old_ptl)
			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1770
		pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1771
		if (pmd_present(pmd))
1772
			force_flush = true;
1773
		VM_BUG_ON(!pmd_none(*new_pmd));
1774

1775
		if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1776
			pgtable_t pgtable;
1777 1778 1779
			pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
			pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
		}
1780 1781
		pmd = move_soft_dirty_pmd(pmd);
		set_pmd_at(mm, new_addr, new_pmd, pmd);
1782 1783
		if (force_flush)
			flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1784 1785
		if (new_ptl != old_ptl)
			spin_unlock(new_ptl);
1786
		spin_unlock(old_ptl);
1787
		return true;
1788
	}
1789
	return false;
1790 1791
}

1792 1793 1794 1795 1796 1797
/*
 * Returns
 *  - 0 if PMD could not be locked
 *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
 *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
 */
1798
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1799
		unsigned long addr, pgprot_t newprot, unsigned long cp_flags)
1800 1801
{
	struct mm_struct *mm = vma->vm_mm;
1802
	spinlock_t *ptl;
1803 1804 1805
	pmd_t entry;
	bool preserve_write;
	int ret;
1806
	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
1807 1808
	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
1809

1810
	ptl = __pmd_trans_huge_lock(pmd, vma);
1811 1812
	if (!ptl)
		return 0;
1813

1814 1815
	preserve_write = prot_numa && pmd_write(*pmd);
	ret = 1;
1816

1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
	if (is_swap_pmd(*pmd)) {
		swp_entry_t entry = pmd_to_swp_entry(*pmd);

		VM_BUG_ON(!is_pmd_migration_entry(*pmd));
		if (is_write_migration_entry(entry)) {
			pmd_t newpmd;
			/*
			 * A protection check is difficult so
			 * just be safe and disable write
			 */
			make_migration_entry_read(&entry);
			newpmd = swp_entry_to_pmd(entry);
1830 1831
			if (pmd_swp_soft_dirty(*pmd))
				newpmd = pmd_swp_mksoft_dirty(newpmd);
1832 1833 1834 1835 1836 1837
			set_pmd_at(mm, addr, pmd, newpmd);
		}
		goto unlock;
	}
#endif

1838 1839 1840 1841 1842 1843 1844
	/*
	 * Avoid trapping faults against the zero page. The read-only
	 * data is likely to be read-cached on the local CPU and
	 * local/remote hits to the zero page are not interesting.
	 */
	if (prot_numa && is_huge_zero_pmd(*pmd))
		goto unlock;
1845

1846 1847 1848
	if (prot_numa && pmd_protnone(*pmd))
		goto unlock;

1849
	/*
1850
	 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1851
	 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1852
	 * which is also under mmap_read_lock(mm):
1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869
	 *
	 *	CPU0:				CPU1:
	 *				change_huge_pmd(prot_numa=1)
	 *				 pmdp_huge_get_and_clear_notify()
	 * madvise_dontneed()
	 *  zap_pmd_range()
	 *   pmd_trans_huge(*pmd) == 0 (without ptl)
	 *   // skip the pmd
	 *				 set_pmd_at();
	 *				 // pmd is re-established
	 *
	 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
	 * which may break userspace.
	 *
	 * pmdp_invalidate() is required to make sure we don't miss
	 * dirty/young flags set by hardware.
	 */
1870
	entry = pmdp_invalidate(vma, addr, pmd);
1871

1872 1873 1874
	entry = pmd_modify(entry, newprot);
	if (preserve_write)
		entry = pmd_mk_savedwrite(entry);
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
	if (uffd_wp) {
		entry = pmd_wrprotect(entry);
		entry = pmd_mkuffd_wp(entry);
	} else if (uffd_wp_resolve) {
		/*
		 * Leave the write bit to be handled by PF interrupt
		 * handler, then things like COW could be properly
		 * handled.
		 */
		entry = pmd_clear_uffd_wp(entry);
	}
1886 1887 1888 1889 1890
	ret = HPAGE_PMD_NR;
	set_pmd_at(mm, addr, pmd, entry);
	BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
unlock:
	spin_unlock(ptl);
1891 1892 1893 1894
	return ret;
}

/*
1895
 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1896
 *
1897 1898
 * Note that if it returns page table lock pointer, this routine returns without
 * unlocking page table lock. So callers must unlock it.
1899
 */
1900
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1901
{
1902 1903
	spinlock_t *ptl;
	ptl = pmd_lock(vma->vm_mm, pmd);
1904 1905
	if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
			pmd_devmap(*pmd)))
1906 1907 1908
		return ptl;
	spin_unlock(ptl);
	return NULL;
1909 1910
}

1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
/*
 * Returns true if a given pud maps a thp, false otherwise.
 *
 * Note that if it returns true, this routine returns without unlocking page
 * table lock. So callers must unlock it.
 */
spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
{
	spinlock_t *ptl;

	ptl = pud_lock(vma->vm_mm, pud);
	if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
		return ptl;
	spin_unlock(ptl);
	return NULL;
}

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
		 pud_t *pud, unsigned long addr)
{
	spinlock_t *ptl;

	ptl = __pud_trans_huge_lock(pud, vma);
	if (!ptl)
		return 0;
	/*
	 * For architectures like ppc64 we look at deposited pgtable
	 * when calling pudp_huge_get_and_clear. So do the
	 * pgtable_trans_huge_withdraw after finishing pudp related
	 * operations.
	 */
1943
	pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
1944
	tlb_remove_pud_tlb_entry(tlb, pud, addr);
1945
	if (vma_is_special_huge(vma)) {
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
		spin_unlock(ptl);
		/* No zero page support yet */
	} else {
		/* No support for anonymous PUD pages yet */
		BUG();
	}
	return 1;
}

static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
		unsigned long haddr)
{
	VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
	VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));

1963
	count_vm_event(THP_SPLIT_PUD);
1964 1965 1966 1967 1968 1969 1970 1971

	pudp_huge_clear_flush_notify(vma, haddr, pud);
}

void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
		unsigned long address)
{
	spinlock_t *ptl;
1972
	struct mmu_notifier_range range;
1973

1974
	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1975
				address & HPAGE_PUD_MASK,
1976 1977 1978
				(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
	mmu_notifier_invalidate_range_start(&range);
	ptl = pud_lock(vma->vm_mm, pud);
1979 1980
	if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
		goto out;
1981
	__split_huge_pud_locked(vma, pud, range.start);
1982 1983 1984

out:
	spin_unlock(ptl);
1985 1986 1987 1988
	/*
	 * No need to double call mmu_notifier->invalidate_range() callback as
	 * the above pudp_huge_clear_flush_notify() did already call it.
	 */
1989
	mmu_notifier_invalidate_range_only_end(&range);
1990 1991 1992
}
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */

1993 1994 1995 1996 1997 1998 1999 2000
static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
		unsigned long haddr, pmd_t *pmd)
{
	struct mm_struct *mm = vma->vm_mm;
	pgtable_t pgtable;
	pmd_t _pmd;
	int i;

2001 2002 2003 2004 2005 2006
	/*
	 * Leave pmd empty until pte is filled note that it is fine to delay
	 * notification until mmu_notifier_invalidate_range_end() as we are
	 * replacing a zero pmd write protected page with a zero pte write
	 * protected page.
	 *
2007
	 * See Documentation/vm/mmu_notifier.rst
2008 2009
	 */
	pmdp_huge_clear_flush(vma, haddr, pmd);
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027

	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
	pmd_populate(mm, &_pmd, pgtable);

	for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
		pte_t *pte, entry;
		entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
		entry = pte_mkspecial(entry);
		pte = pte_offset_map(&_pmd, haddr);
		VM_BUG_ON(!pte_none(*pte));
		set_pte_at(mm, haddr, pte, entry);
		pte_unmap(pte);
	}
	smp_wmb(); /* make pte visible before pmd */
	pmd_populate(mm, pmd, pgtable);
}

static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2028
		unsigned long haddr, bool freeze)
2029 2030 2031 2032
{
	struct mm_struct *mm = vma->vm_mm;
	struct page *page;
	pgtable_t pgtable;
2033
	pmd_t old_pmd, _pmd;
2034
	bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2035
	unsigned long addr;
2036 2037 2038 2039 2040
	int i;

	VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
	VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
	VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2041 2042
	VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
				&& !pmd_devmap(*pmd));
2043 2044 2045

	count_vm_event(THP_SPLIT_PMD);

2046 2047
	if (!vma_is_anonymous(vma)) {
		_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2048 2049 2050 2051 2052 2053
		/*
		 * We are going to unmap this huge page. So
		 * just go ahead and zap it
		 */
		if (arch_needs_pgtable_deposit())
			zap_deposited_table(mm, pmd);
2054
		if (vma_is_special_huge(vma))
2055 2056
			return;
		page = pmd_page(_pmd);
2057 2058
		if (!PageDirty(page) && pmd_dirty(_pmd))
			set_page_dirty(page);
2059 2060 2061 2062
		if (!PageReferenced(page) && pmd_young(_pmd))
			SetPageReferenced(page);
		page_remove_rmap(page, true);
		put_page(page);
2063
		add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2064
		return;
2065
	} else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
2066 2067 2068 2069 2070 2071 2072 2073 2074
		/*
		 * FIXME: Do we want to invalidate secondary mmu by calling
		 * mmu_notifier_invalidate_range() see comments below inside
		 * __split_huge_pmd() ?
		 *
		 * We are going from a zero huge page write protected to zero
		 * small page also write protected so it does not seems useful
		 * to invalidate secondary mmu at this time.
		 */
2075 2076 2077
		return __split_huge_zero_page_pmd(vma, haddr, pmd);
	}

2078 2079 2080 2081 2082 2083 2084 2085
	/*
	 * Up to this point the pmd is present and huge and userland has the
	 * whole access to the hugepage during the split (which happens in
	 * place). If we overwrite the pmd with the not-huge version pointing
	 * to the pte here (which of course we could if all CPUs were bug
	 * free), userland could trigger a small page size TLB miss on the
	 * small sized TLB while the hugepage TLB entry is still established in
	 * the huge TLB. Some CPU doesn't like that.
2086 2087
	 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
	 * 383 on page 105. Intel should be safe but is also warns that it's
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
	 * only safe if the permission and cache attributes of the two entries
	 * loaded in the two TLB is identical (which should be the case here).
	 * But it is generally safer to never allow small and huge TLB entries
	 * for the same virtual address to be loaded simultaneously. So instead
	 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
	 * current pmd notpresent (atomically because here the pmd_trans_huge
	 * must remain set at all times on the pmd until the split is complete
	 * for this pmd), then we flush the SMP TLB and finally we write the
	 * non-huge version of the pmd entry with pmd_populate.
	 */
	old_pmd = pmdp_invalidate(vma, haddr, pmd);

	pmd_migration = is_pmd_migration_entry(old_pmd);
2101
	if (unlikely(pmd_migration)) {
2102 2103
		swp_entry_t entry;

2104
		entry = pmd_to_swp_entry(old_pmd);
2105
		page = pfn_to_page(swp_offset(entry));
2106 2107 2108
		write = is_write_migration_entry(entry);
		young = false;
		soft_dirty = pmd_swp_soft_dirty(old_pmd);
2109
		uffd_wp = pmd_swp_uffd_wp(old_pmd);
2110
	} else {
2111
		page = pmd_page(old_pmd);
2112 2113 2114 2115 2116
		if (pmd_dirty(old_pmd))
			SetPageDirty(page);
		write = pmd_write(old_pmd);
		young = pmd_young(old_pmd);
		soft_dirty = pmd_soft_dirty(old_pmd);
2117
		uffd_wp = pmd_uffd_wp(old_pmd);
2118
	}
2119
	VM_BUG_ON_PAGE(!page_count(page), page);
2120
	page_ref_add(page, HPAGE_PMD_NR - 1);
2121

2122 2123 2124 2125
	/*
	 * Withdraw the table only after we mark the pmd entry invalid.
	 * This's critical for some architectures (Power).
	 */
2126 2127 2128
	pgtable = pgtable_trans_huge_withdraw(mm, pmd);
	pmd_populate(mm, &_pmd, pgtable);

2129
	for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2130 2131 2132 2133 2134 2135
		pte_t entry, *pte;
		/*
		 * Note that NUMA hinting access restrictions are not
		 * transferred to avoid any possibility of altering
		 * permissions across VMAs.
		 */
2136
		if (freeze || pmd_migration) {
2137 2138 2139
			swp_entry_t swp_entry;
			swp_entry = make_migration_entry(page + i, write);
			entry = swp_entry_to_pte(swp_entry);
2140 2141
			if (soft_dirty)
				entry = pte_swp_mksoft_dirty(entry);
2142 2143
			if (uffd_wp)
				entry = pte_swp_mkuffd_wp(entry);
2144
		} else {
2145
			entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2146
			entry = maybe_mkwrite(entry, vma);
2147 2148 2149 2150
			if (!write)
				entry = pte_wrprotect(entry);
			if (!young)
				entry = pte_mkold(entry);
2151 2152
			if (soft_dirty)
				entry = pte_mksoft_dirty(entry);
2153 2154
			if (uffd_wp)
				entry = pte_mkuffd_wp(entry);
2155
		}
2156
		pte = pte_offset_map(&_pmd, addr);
2157
		BUG_ON(!pte_none(*pte));
2158
		set_pte_at(mm, addr, pte, entry);
2159
		if (!pmd_migration)
2160
			atomic_inc(&page[i]._mapcount);
2161
		pte_unmap(pte);
2162 2163
	}

2164 2165 2166 2167 2168 2169 2170
	if (!pmd_migration) {
		/*
		 * Set PG_double_map before dropping compound_mapcount to avoid
		 * false-negative page_mapped().
		 */
		if (compound_mapcount(page) > 1 &&
		    !TestSetPageDoubleMap(page)) {
2171
			for (i = 0; i < HPAGE_PMD_NR; i++)
2172 2173 2174 2175 2176 2177
				atomic_inc(&page[i]._mapcount);
		}

		lock_page_memcg(page);
		if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
			/* Last compound_mapcount is gone. */
2178 2179
			__mod_lruvec_page_state(page, NR_ANON_THPS,
						-HPAGE_PMD_NR);
2180 2181 2182 2183 2184
			if (TestClearPageDoubleMap(page)) {
				/* No need in mapcount reference anymore */
				for (i = 0; i < HPAGE_PMD_NR; i++)
					atomic_dec(&page[i]._mapcount);
			}
2185
		}
2186
		unlock_page_memcg(page);
2187 2188 2189 2190
	}

	smp_wmb(); /* make pte visible before pmd */
	pmd_populate(mm, pmd, pgtable);
2191 2192

	if (freeze) {
2193
		for (i = 0; i < HPAGE_PMD_NR; i++) {
2194 2195 2196 2197
			page_remove_rmap(page + i, false);
			put_page(page + i);
		}
	}
2198 2199 2200
}

void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2201
		unsigned long address, bool freeze, struct page *page)
2202 2203
{
	spinlock_t *ptl;
2204
	struct mmu_notifier_range range;
2205
	bool do_unlock_page = false;
2206
	pmd_t _pmd;
2207

2208
	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
2209
				address & HPAGE_PMD_MASK,
2210 2211 2212
				(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
	mmu_notifier_invalidate_range_start(&range);
	ptl = pmd_lock(vma->vm_mm, pmd);
2213 2214 2215 2216 2217 2218

	/*
	 * If caller asks to setup a migration entries, we need a page to check
	 * pmd against. Otherwise we can end up replacing wrong page.
	 */
	VM_BUG_ON(freeze && !page);
2219 2220 2221 2222 2223
	if (page) {
		VM_WARN_ON_ONCE(!PageLocked(page));
		if (page != pmd_page(*pmd))
			goto out;
	}
2224

2225
repeat:
2226
	if (pmd_trans_huge(*pmd)) {
2227 2228
		if (!page) {
			page = pmd_page(*pmd);
2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
			/*
			 * An anonymous page must be locked, to ensure that a
			 * concurrent reuse_swap_page() sees stable mapcount;
			 * but reuse_swap_page() is not used on shmem or file,
			 * and page lock must not be taken when zap_pmd_range()
			 * calls __split_huge_pmd() while i_mmap_lock is held.
			 */
			if (PageAnon(page)) {
				if (unlikely(!trylock_page(page))) {
					get_page(page);
					_pmd = *pmd;
					spin_unlock(ptl);
					lock_page(page);
					spin_lock(ptl);
					if (unlikely(!pmd_same(*pmd, _pmd))) {
						unlock_page(page);
						put_page(page);
						page = NULL;
						goto repeat;
					}
2249 2250
					put_page(page);
				}
2251
				do_unlock_page = true;
2252 2253
			}
		}
2254
		if (PageMlocked(page))
2255
			clear_page_mlock(page);
2256
	} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
2257
		goto out;
2258
	__split_huge_pmd_locked(vma, pmd, range.start, freeze);
2259
out:
2260
	spin_unlock(ptl);
2261
	if (do_unlock_page)
2262
		unlock_page(page);
2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
	/*
	 * No need to double call mmu_notifier->invalidate_range() callback.
	 * They are 3 cases to consider inside __split_huge_pmd_locked():
	 *  1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
	 *  2) __split_huge_zero_page_pmd() read only zero page and any write
	 *    fault will trigger a flush_notify before pointing to a new page
	 *    (it is fine if the secondary mmu keeps pointing to the old zero
	 *    page in the meantime)
	 *  3) Split a huge pmd into pte pointing to the same page. No need
	 *     to invalidate secondary tlb entry they are all still valid.
	 *     any further changes to individual pte will notify. So no need
	 *     to call mmu_notifier->invalidate_range()
	 */
2276
	mmu_notifier_invalidate_range_only_end(&range);
2277 2278
}

2279 2280
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
		bool freeze, struct page *page)
2281
{
2282
	pgd_t *pgd;
2283
	p4d_t *p4d;
2284
	pud_t *pud;
2285 2286
	pmd_t *pmd;

2287
	pgd = pgd_offset(vma->vm_mm, address);
2288 2289 2290
	if (!pgd_present(*pgd))
		return;

2291 2292 2293 2294 2295
	p4d = p4d_offset(pgd, address);
	if (!p4d_present(*p4d))
		return;

	pud = pud_offset(p4d, address);
2296 2297 2298 2299
	if (!pud_present(*pud))
		return;

	pmd = pmd_offset(pud, address);
2300

2301
	__split_huge_pmd(vma, pmd, address, freeze, page);
2302 2303
}

2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315
static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
{
	/*
	 * If the new address isn't hpage aligned and it could previously
	 * contain an hugepage: check if we need to split an huge pmd.
	 */
	if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
	    range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
			 ALIGN(address, HPAGE_PMD_SIZE)))
		split_huge_pmd_address(vma, address, false, NULL);
}

2316
void vma_adjust_trans_huge(struct vm_area_struct *vma,
2317 2318 2319 2320
			     unsigned long start,
			     unsigned long end,
			     long adjust_next)
{
2321 2322
	/* Check if we need to split start first. */
	split_huge_pmd_if_needed(vma, start);
2323

2324 2325
	/* Check if we need to split end next. */
	split_huge_pmd_if_needed(vma, end);
2326 2327

	/*
2328 2329
	 * If we're also updating the vma->vm_next->vm_start,
	 * check if we need to split it.
2330 2331 2332 2333
	 */
	if (adjust_next > 0) {
		struct vm_area_struct *next = vma->vm_next;
		unsigned long nstart = next->vm_start;
2334
		nstart += adjust_next;
2335
		split_huge_pmd_if_needed(next, nstart);
2336 2337
	}
}
2338

2339
static void unmap_page(struct page *page)
2340
{
2341
	enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK |
2342
		TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
M
Minchan Kim 已提交
2343
	bool unmap_success;
2344 2345 2346

	VM_BUG_ON_PAGE(!PageHead(page), page);

2347
	if (PageAnon(page))
2348
		ttu_flags |= TTU_SPLIT_FREEZE;
2349

M
Minchan Kim 已提交
2350 2351
	unmap_success = try_to_unmap(page, ttu_flags);
	VM_BUG_ON_PAGE(!unmap_success, page);
2352 2353
}

2354
static void remap_page(struct page *page, unsigned int nr)
2355
{
2356
	int i;
2357 2358 2359
	if (PageTransHuge(page)) {
		remove_migration_ptes(page, page, true);
	} else {
2360
		for (i = 0; i < nr; i++)
2361 2362
			remove_migration_ptes(page + i, page + i, true);
	}
2363 2364
}

2365
static void lru_add_page_tail(struct page *head, struct page *tail,
2366 2367
		struct lruvec *lruvec, struct list_head *list)
{
2368 2369 2370
	VM_BUG_ON_PAGE(!PageHead(head), head);
	VM_BUG_ON_PAGE(PageCompound(tail), head);
	VM_BUG_ON_PAGE(PageLRU(tail), head);
2371
	lockdep_assert_held(&lruvec->lru_lock);
2372

A
Alex Shi 已提交
2373
	if (list) {
2374
		/* page reclaim is reclaiming a huge page */
A
Alex Shi 已提交
2375
		VM_WARN_ON(PageLRU(head));
2376 2377
		get_page(tail);
		list_add_tail(&tail->lru, list);
2378
	} else {
A
Alex Shi 已提交
2379 2380 2381 2382
		/* head is still on lru (and we have it frozen) */
		VM_WARN_ON(!PageLRU(head));
		SetPageLRU(tail);
		list_add_tail(&tail->lru, &head->lru);
2383 2384 2385
	}
}

2386
static void __split_huge_page_tail(struct page *head, int tail,
2387 2388 2389 2390
		struct lruvec *lruvec, struct list_head *list)
{
	struct page *page_tail = head + tail;

2391
	VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2392 2393

	/*
2394 2395 2396
	 * Clone page flags before unfreezing refcount.
	 *
	 * After successful get_page_unless_zero() might follow flags change,
2397
	 * for example lock_page() which set PG_waiters.
2398 2399 2400 2401 2402
	 */
	page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	page_tail->flags |= (head->flags &
			((1L << PG_referenced) |
			 (1L << PG_swapbacked) |
2403
			 (1L << PG_swapcache) |
2404 2405 2406
			 (1L << PG_mlocked) |
			 (1L << PG_uptodate) |
			 (1L << PG_active) |
2407
			 (1L << PG_workingset) |
2408
			 (1L << PG_locked) |
2409
			 (1L << PG_unevictable) |
2410 2411 2412
#ifdef CONFIG_64BIT
			 (1L << PG_arch_2) |
#endif
2413
			 (1L << PG_dirty)));
2414

2415 2416 2417 2418 2419 2420
	/* ->mapping in first tail page is compound_mapcount */
	VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
			page_tail);
	page_tail->mapping = head->mapping;
	page_tail->index = head->index + tail;

2421
	/* Page flags must be visible before we make the page non-compound. */
2422 2423
	smp_wmb();

2424 2425 2426 2427 2428 2429
	/*
	 * Clear PageTail before unfreezing page refcount.
	 *
	 * After successful get_page_unless_zero() might follow put_page()
	 * which needs correct compound_head().
	 */
2430 2431
	clear_compound_head(page_tail);

2432 2433 2434 2435
	/* Finally unfreeze refcount. Additional reference from page cache. */
	page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
					  PageSwapCache(head)));

2436 2437 2438 2439 2440 2441
	if (page_is_young(head))
		set_page_young(page_tail);
	if (page_is_idle(head))
		set_page_idle(page_tail);

	page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
M
Michal Hocko 已提交
2442 2443 2444 2445 2446 2447

	/*
	 * always add to the tail because some iterators expect new
	 * pages to show after the currently processed elements - e.g.
	 * migrate_pages
	 */
2448 2449 2450
	lru_add_page_tail(head, page_tail, lruvec, list);
}

2451
static void __split_huge_page(struct page *page, struct list_head *list,
A
Alex Shi 已提交
2452
		pgoff_t end)
2453 2454 2455
{
	struct page *head = compound_head(page);
	struct lruvec *lruvec;
2456 2457
	struct address_space *swap_cache = NULL;
	unsigned long offset = 0;
2458
	unsigned int nr = thp_nr_pages(head);
2459
	int i;
2460 2461

	/* complete memcg works before add pages to LRU */
2462
	split_page_memcg(head, nr);
2463

2464 2465 2466 2467 2468 2469 2470 2471
	if (PageAnon(head) && PageSwapCache(head)) {
		swp_entry_t entry = { .val = page_private(head) };

		offset = swp_offset(entry);
		swap_cache = swap_address_space(entry);
		xa_lock(&swap_cache->i_pages);
	}

2472 2473
	/* lock lru list/PageCompound, ref freezed by page_ref_freeze */
	lruvec = lock_page_lruvec(head);
A
Alex Shi 已提交
2474

2475
	for (i = nr - 1; i >= 1; i--) {
2476
		__split_huge_page_tail(head, i, lruvec, list);
2477 2478
		/* Some pages can be beyond i_size: drop them from page cache */
		if (head[i].index >= end) {
2479
			ClearPageDirty(head + i);
2480
			__delete_from_page_cache(head + i, NULL);
2481 2482
			if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
				shmem_uncharge(head->mapping->host, 1);
2483
			put_page(head + i);
2484 2485 2486 2487 2488 2489
		} else if (!PageAnon(page)) {
			__xa_store(&head->mapping->i_pages, head[i].index,
					head + i, 0);
		} else if (swap_cache) {
			__xa_store(&swap_cache->i_pages, offset + i,
					head + i, 0);
2490 2491
		}
	}
2492 2493

	ClearPageCompound(head);
2494
	unlock_page_lruvec(lruvec);
A
Alex Shi 已提交
2495
	/* Caller disabled irqs, so they are still disabled here */
2496

2497
	split_page_owner(head, nr);
2498

2499 2500
	/* See comment in __split_huge_page_tail() */
	if (PageAnon(head)) {
M
Matthew Wilcox 已提交
2501
		/* Additional pin to swap cache */
2502
		if (PageSwapCache(head)) {
2503
			page_ref_add(head, 2);
2504 2505
			xa_unlock(&swap_cache->i_pages);
		} else {
2506
			page_ref_inc(head);
2507
		}
2508
	} else {
M
Matthew Wilcox 已提交
2509
		/* Additional pin to page cache */
2510
		page_ref_add(head, 2);
M
Matthew Wilcox 已提交
2511
		xa_unlock(&head->mapping->i_pages);
2512
	}
A
Alex Shi 已提交
2513
	local_irq_enable();
2514

2515
	remap_page(head, nr);
2516

H
Huang Ying 已提交
2517 2518 2519 2520 2521 2522
	if (PageSwapCache(head)) {
		swp_entry_t entry = { .val = page_private(head) };

		split_swap_cluster(entry);
	}

2523
	for (i = 0; i < nr; i++) {
2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
		struct page *subpage = head + i;
		if (subpage == page)
			continue;
		unlock_page(subpage);

		/*
		 * Subpages may be freed if there wasn't any mapping
		 * like if add_to_swap() is running on a lru page that
		 * had its mapping zapped. And freeing these pages
		 * requires taking the lru_lock so we do the put_page
		 * of the tail pages after the split is complete.
		 */
		put_page(subpage);
	}
}

2540 2541
int total_mapcount(struct page *page)
{
2542
	int i, compound, nr, ret;
2543 2544 2545 2546 2547 2548

	VM_BUG_ON_PAGE(PageTail(page), page);

	if (likely(!PageCompound(page)))
		return atomic_read(&page->_mapcount) + 1;

K
Kirill A. Shutemov 已提交
2549
	compound = compound_mapcount(page);
2550
	nr = compound_nr(page);
2551
	if (PageHuge(page))
K
Kirill A. Shutemov 已提交
2552 2553
		return compound;
	ret = compound;
2554
	for (i = 0; i < nr; i++)
2555
		ret += atomic_read(&page[i]._mapcount) + 1;
K
Kirill A. Shutemov 已提交
2556 2557
	/* File pages has compound_mapcount included in _mapcount */
	if (!PageAnon(page))
2558
		return ret - compound * nr;
2559
	if (PageDoubleMap(page))
2560
		ret -= nr;
2561 2562 2563
	return ret;
}

2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604
/*
 * This calculates accurately how many mappings a transparent hugepage
 * has (unlike page_mapcount() which isn't fully accurate). This full
 * accuracy is primarily needed to know if copy-on-write faults can
 * reuse the page and change the mapping to read-write instead of
 * copying them. At the same time this returns the total_mapcount too.
 *
 * The function returns the highest mapcount any one of the subpages
 * has. If the return value is one, even if different processes are
 * mapping different subpages of the transparent hugepage, they can
 * all reuse it, because each process is reusing a different subpage.
 *
 * The total_mapcount is instead counting all virtual mappings of the
 * subpages. If the total_mapcount is equal to "one", it tells the
 * caller all mappings belong to the same "mm" and in turn the
 * anon_vma of the transparent hugepage can become the vma->anon_vma
 * local one as no other process may be mapping any of the subpages.
 *
 * It would be more accurate to replace page_mapcount() with
 * page_trans_huge_mapcount(), however we only use
 * page_trans_huge_mapcount() in the copy-on-write faults where we
 * need full accuracy to avoid breaking page pinning, because
 * page_trans_huge_mapcount() is slower than page_mapcount().
 */
int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
{
	int i, ret, _total_mapcount, mapcount;

	/* hugetlbfs shouldn't call it */
	VM_BUG_ON_PAGE(PageHuge(page), page);

	if (likely(!PageTransCompound(page))) {
		mapcount = atomic_read(&page->_mapcount) + 1;
		if (total_mapcount)
			*total_mapcount = mapcount;
		return mapcount;
	}

	page = compound_head(page);

	_total_mapcount = ret = 0;
2605
	for (i = 0; i < thp_nr_pages(page); i++) {
2606 2607 2608 2609 2610 2611
		mapcount = atomic_read(&page[i]._mapcount) + 1;
		ret = max(ret, mapcount);
		_total_mapcount += mapcount;
	}
	if (PageDoubleMap(page)) {
		ret -= 1;
2612
		_total_mapcount -= thp_nr_pages(page);
2613 2614 2615 2616 2617 2618 2619 2620 2621
	}
	mapcount = compound_mapcount(page);
	ret += mapcount;
	_total_mapcount += mapcount;
	if (total_mapcount)
		*total_mapcount = _total_mapcount;
	return ret;
}

2622 2623 2624 2625 2626
/* Racy check whether the huge page can be split */
bool can_split_huge_page(struct page *page, int *pextra_pins)
{
	int extra_pins;

M
Matthew Wilcox 已提交
2627
	/* Additional pins from page cache */
2628
	if (PageAnon(page))
2629
		extra_pins = PageSwapCache(page) ? thp_nr_pages(page) : 0;
2630
	else
2631
		extra_pins = thp_nr_pages(page);
2632 2633 2634 2635 2636
	if (pextra_pins)
		*pextra_pins = extra_pins;
	return total_mapcount(page) == page_count(page) - extra_pins - 1;
}

2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658
/*
 * This function splits huge page into normal pages. @page can point to any
 * subpage of huge page to split. Split doesn't change the position of @page.
 *
 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
 * The huge page must be locked.
 *
 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
 *
 * Both head page and tail pages will inherit mapping, flags, and so on from
 * the hugepage.
 *
 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
 * they are not mapped.
 *
 * Returns 0 if the hugepage is split successfully.
 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
 * us.
 */
int split_huge_page_to_list(struct page *page, struct list_head *list)
{
	struct page *head = compound_head(page);
2659
	struct deferred_split *ds_queue = get_deferred_split_queue(head);
2660 2661 2662
	struct anon_vma *anon_vma = NULL;
	struct address_space *mapping = NULL;
	int count, mapcount, extra_pins, ret;
2663
	pgoff_t end;
2664

2665
	VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
2666 2667
	VM_BUG_ON_PAGE(!PageLocked(head), head);
	VM_BUG_ON_PAGE(!PageCompound(head), head);
2668

2669
	if (PageWriteback(head))
2670 2671
		return -EBUSY;

2672 2673
	if (PageAnon(head)) {
		/*
2674
		 * The caller does not necessarily hold an mmap_lock that would
2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685
		 * prevent the anon_vma disappearing so we first we take a
		 * reference to it and then lock the anon_vma for write. This
		 * is similar to page_lock_anon_vma_read except the write lock
		 * is taken to serialise against parallel split or collapse
		 * operations.
		 */
		anon_vma = page_get_anon_vma(head);
		if (!anon_vma) {
			ret = -EBUSY;
			goto out;
		}
2686
		end = -1;
2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699
		mapping = NULL;
		anon_vma_lock_write(anon_vma);
	} else {
		mapping = head->mapping;

		/* Truncated ? */
		if (!mapping) {
			ret = -EBUSY;
			goto out;
		}

		anon_vma = NULL;
		i_mmap_lock_read(mapping);
2700 2701 2702 2703 2704 2705 2706 2707 2708

		/*
		 *__split_huge_page() may need to trim off pages beyond EOF:
		 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
		 * which cannot be nested inside the page tree lock. So note
		 * end now: i_size itself may be changed at any moment, but
		 * head page lock is good enough to serialize the trimming.
		 */
		end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2709 2710 2711
	}

	/*
2712
	 * Racy check if we can split the page, before unmap_page() will
2713 2714
	 * split PMDs
	 */
2715
	if (!can_split_huge_page(head, &extra_pins)) {
2716 2717 2718 2719
		ret = -EBUSY;
		goto out_unlock;
	}

2720
	unmap_page(head);
2721 2722
	VM_BUG_ON_PAGE(compound_mapcount(head), head);

A
Alex Shi 已提交
2723 2724
	/* block interrupt reentry in xa_lock and spinlock */
	local_irq_disable();
2725
	if (mapping) {
M
Matthew Wilcox 已提交
2726
		XA_STATE(xas, &mapping->i_pages, page_index(head));
2727 2728

		/*
M
Matthew Wilcox 已提交
2729
		 * Check if the head page is present in page cache.
2730 2731
		 * We assume all tail are present too, if head is there.
		 */
M
Matthew Wilcox 已提交
2732 2733
		xa_lock(&mapping->i_pages);
		if (xas_load(&xas) != head)
2734 2735 2736
			goto fail;
	}

2737
	/* Prevent deferred_split_scan() touching ->_refcount */
2738
	spin_lock(&ds_queue->split_queue_lock);
2739 2740
	count = page_count(head);
	mapcount = total_mapcount(head);
2741
	if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
2742
		if (!list_empty(page_deferred_list(head))) {
2743
			ds_queue->split_queue_len--;
2744 2745
			list_del(page_deferred_list(head));
		}
2746
		spin_unlock(&ds_queue->split_queue_lock);
2747
		if (mapping) {
2748 2749
			int nr = thp_nr_pages(head);

2750
			if (PageSwapBacked(head))
2751 2752
				__mod_lruvec_page_state(head, NR_SHMEM_THPS,
							-nr);
2753
			else
2754 2755
				__mod_lruvec_page_state(head, NR_FILE_THPS,
							-nr);
2756 2757
		}

A
Alex Shi 已提交
2758
		__split_huge_page(page, list, end);
H
Huang Ying 已提交
2759
		ret = 0;
2760
	} else {
2761 2762 2763 2764 2765 2766 2767 2768
		if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
			pr_alert("total_mapcount: %u, page_count(): %u\n",
					mapcount, count);
			if (PageTail(page))
				dump_page(head, NULL);
			dump_page(page, "total_mapcount(head) > 0");
			BUG();
		}
2769
		spin_unlock(&ds_queue->split_queue_lock);
2770
fail:		if (mapping)
M
Matthew Wilcox 已提交
2771
			xa_unlock(&mapping->i_pages);
A
Alex Shi 已提交
2772
		local_irq_enable();
2773
		remap_page(head, thp_nr_pages(head));
2774 2775 2776 2777
		ret = -EBUSY;
	}

out_unlock:
2778 2779 2780 2781 2782 2783
	if (anon_vma) {
		anon_vma_unlock_write(anon_vma);
		put_anon_vma(anon_vma);
	}
	if (mapping)
		i_mmap_unlock_read(mapping);
2784 2785 2786 2787
out:
	count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
	return ret;
}
2788 2789 2790

void free_transhuge_page(struct page *page)
{
2791
	struct deferred_split *ds_queue = get_deferred_split_queue(page);
2792 2793
	unsigned long flags;

2794
	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2795
	if (!list_empty(page_deferred_list(page))) {
2796
		ds_queue->split_queue_len--;
2797 2798
		list_del(page_deferred_list(page));
	}
2799
	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2800 2801 2802 2803 2804
	free_compound_page(page);
}

void deferred_split_huge_page(struct page *page)
{
2805 2806
	struct deferred_split *ds_queue = get_deferred_split_queue(page);
#ifdef CONFIG_MEMCG
2807
	struct mem_cgroup *memcg = page_memcg(compound_head(page));
2808
#endif
2809 2810 2811 2812
	unsigned long flags;

	VM_BUG_ON_PAGE(!PageTransHuge(page), page);

2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825
	/*
	 * The try_to_unmap() in page reclaim path might reach here too,
	 * this may cause a race condition to corrupt deferred split queue.
	 * And, if page reclaim is already handling the same page, it is
	 * unnecessary to handle it again in shrinker.
	 *
	 * Check PageSwapCache to determine if the page is being
	 * handled by page reclaim since THP swap would add the page into
	 * swap cache before calling try_to_unmap().
	 */
	if (PageSwapCache(page))
		return;

2826
	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2827
	if (list_empty(page_deferred_list(page))) {
2828
		count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2829 2830
		list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
		ds_queue->split_queue_len++;
2831 2832 2833 2834 2835
#ifdef CONFIG_MEMCG
		if (memcg)
			memcg_set_shrinker_bit(memcg, page_to_nid(page),
					       deferred_split_shrinker.id);
#endif
2836
	}
2837
	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2838 2839 2840 2841 2842
}

static unsigned long deferred_split_count(struct shrinker *shrink,
		struct shrink_control *sc)
{
2843
	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2844
	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2845 2846 2847 2848 2849

#ifdef CONFIG_MEMCG
	if (sc->memcg)
		ds_queue = &sc->memcg->deferred_split_queue;
#endif
2850
	return READ_ONCE(ds_queue->split_queue_len);
2851 2852 2853 2854 2855
}

static unsigned long deferred_split_scan(struct shrinker *shrink,
		struct shrink_control *sc)
{
2856
	struct pglist_data *pgdata = NODE_DATA(sc->nid);
2857
	struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2858 2859 2860 2861 2862
	unsigned long flags;
	LIST_HEAD(list), *pos, *next;
	struct page *page;
	int split = 0;

2863 2864 2865 2866 2867
#ifdef CONFIG_MEMCG
	if (sc->memcg)
		ds_queue = &sc->memcg->deferred_split_queue;
#endif

2868
	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2869
	/* Take pin on all head pages to avoid freeing them under us */
2870
	list_for_each_safe(pos, next, &ds_queue->split_queue) {
2871 2872
		page = list_entry((void *)pos, struct page, mapping);
		page = compound_head(page);
2873 2874 2875 2876
		if (get_page_unless_zero(page)) {
			list_move(page_deferred_list(page), &list);
		} else {
			/* We lost race with put_compound_page() */
2877
			list_del_init(page_deferred_list(page));
2878
			ds_queue->split_queue_len--;
2879
		}
2880 2881
		if (!--sc->nr_to_scan)
			break;
2882
	}
2883
	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2884 2885 2886

	list_for_each_safe(pos, next, &list) {
		page = list_entry((void *)pos, struct page, mapping);
2887 2888
		if (!trylock_page(page))
			goto next;
2889 2890 2891 2892
		/* split_huge_page() removes page from list on success */
		if (!split_huge_page(page))
			split++;
		unlock_page(page);
2893
next:
2894 2895 2896
		put_page(page);
	}

2897 2898 2899
	spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
	list_splice_tail(&list, &ds_queue->split_queue);
	spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2900

2901 2902 2903 2904
	/*
	 * Stop shrinker if we didn't split any page, but the queue is empty.
	 * This can happen if pages were freed under us.
	 */
2905
	if (!split && list_empty(&ds_queue->split_queue))
2906 2907
		return SHRINK_STOP;
	return split;
2908 2909 2910 2911 2912 2913
}

static struct shrinker deferred_split_shrinker = {
	.count_objects = deferred_split_count,
	.scan_objects = deferred_split_scan,
	.seeks = DEFAULT_SEEKS,
2914 2915
	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
		 SHRINKER_NONSLAB,
2916
};
2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941

#ifdef CONFIG_DEBUG_FS
static int split_huge_pages_set(void *data, u64 val)
{
	struct zone *zone;
	struct page *page;
	unsigned long pfn, max_zone_pfn;
	unsigned long total = 0, split = 0;

	if (val != 1)
		return -EINVAL;

	for_each_populated_zone(zone) {
		max_zone_pfn = zone_end_pfn(zone);
		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
			if (!pfn_valid(pfn))
				continue;

			page = pfn_to_page(pfn);
			if (!get_page_unless_zero(page))
				continue;

			if (zone != page_zone(page))
				goto next;

2942
			if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
				goto next;

			total++;
			lock_page(page);
			if (!split_huge_page(page))
				split++;
			unlock_page(page);
next:
			put_page(page);
		}
	}

2955
	pr_info("%lu of %lu THP split\n", split, total);
2956 2957 2958

	return 0;
}
2959
DEFINE_DEBUGFS_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
2960 2961 2962 2963
		"%llu\n");

static int __init split_huge_pages_debugfs(void)
{
2964 2965
	debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
			    &split_huge_pages_fops);
2966 2967 2968 2969
	return 0;
}
late_initcall(split_huge_pages_debugfs);
#endif
2970 2971 2972 2973 2974 2975 2976 2977 2978 2979

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
		struct page *page)
{
	struct vm_area_struct *vma = pvmw->vma;
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address = pvmw->address;
	pmd_t pmdval;
	swp_entry_t entry;
2980
	pmd_t pmdswp;
2981 2982 2983 2984 2985

	if (!(pvmw->pmd && !pvmw->pte))
		return;

	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
2986
	pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
2987 2988 2989
	if (pmd_dirty(pmdval))
		set_page_dirty(page);
	entry = make_migration_entry(page, pmd_write(pmdval));
2990 2991 2992 2993
	pmdswp = swp_entry_to_pmd(entry);
	if (pmd_soft_dirty(pmdval))
		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012
	page_remove_rmap(page, true);
	put_page(page);
}

void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
{
	struct vm_area_struct *vma = pvmw->vma;
	struct mm_struct *mm = vma->vm_mm;
	unsigned long address = pvmw->address;
	unsigned long mmun_start = address & HPAGE_PMD_MASK;
	pmd_t pmde;
	swp_entry_t entry;

	if (!(pvmw->pmd && !pvmw->pte))
		return;

	entry = pmd_to_swp_entry(*pvmw->pmd);
	get_page(new);
	pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
3013 3014
	if (pmd_swp_soft_dirty(*pvmw->pmd))
		pmde = pmd_mksoft_dirty(pmde);
3015
	if (is_write_migration_entry(entry))
3016
		pmde = maybe_pmd_mkwrite(pmde, vma);
3017 3018

	flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
3019 3020 3021 3022
	if (PageAnon(new))
		page_add_anon_rmap(new, vma, mmun_start, true);
	else
		page_add_file_rmap(new, true);
3023
	set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
3024
	if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
3025 3026 3027 3028
		mlock_vma_page(new);
	update_mmu_cache_pmd(vma, address, pvmw->pmd);
}
#endif