debug_vm_pgtable.c 37.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
// SPDX-License-Identifier: GPL-2.0-only
/*
 * This kernel test validates architecture page table helpers and
 * accessors and helps in verifying their continued compliance with
 * expected generic MM semantics.
 *
 * Copyright (C) 2019 ARM Ltd.
 *
 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
 */
11
#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
12 13 14 15 16 17 18 19 20 21 22 23

#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/kernel.h>
#include <linux/kconfig.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mm_types.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
#include <linux/printk.h>
24
#include <linux/pgtable.h>
25 26 27 28 29 30
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/start_kernel.h>
#include <linux/sched/mm.h>
31
#include <linux/io.h>
32
#include <asm/pgalloc.h>
33
#include <asm/tlbflush.h>
34

35 36 37 38 39 40
/*
 * Please refer Documentation/vm/arch_pgtable_helpers.rst for the semantics
 * expectations that are being validated here. All future changes in here
 * or the documentation need to be in sync.
 */

41 42 43 44 45 46 47
#define VMFLAGS	(VM_READ|VM_WRITE|VM_EXEC)

/*
 * On s390 platform, the lower 4 bits are used to identify given page table
 * entry type. But these bits might affect the ability to clear entries with
 * pxx_clear() because of how dynamic page table folding works on s390. So
 * while loading up the entries do not change the lower 4 bits. It does not
48 49
 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
 * used to mark a pte entry.
50
 */
51 52 53 54 55 56 57 58
#define S390_SKIP_MASK		GENMASK(3, 0)
#if __BITS_PER_LONG == 64
#define PPC64_SKIP_MASK		GENMASK(62, 62)
#else
#define PPC64_SKIP_MASK		0x0
#endif
#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
59 60
#define RANDOM_NZVALUE	GENMASK(7, 0)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
struct pgtable_debug_args {
	struct mm_struct	*mm;
	struct vm_area_struct	*vma;

	pgd_t			*pgdp;
	p4d_t			*p4dp;
	pud_t			*pudp;
	pmd_t			*pmdp;
	pte_t			*ptep;

	p4d_t			*start_p4dp;
	pud_t			*start_pudp;
	pmd_t			*start_pmdp;
	pgtable_t		start_ptep;

	unsigned long		vaddr;
	pgprot_t		page_prot;
	pgprot_t		page_prot_none;

	bool			is_contiguous_page;
	unsigned long		pud_pfn;
	unsigned long		pmd_pfn;
	unsigned long		pte_pfn;

	unsigned long		fixed_pgd_pfn;
	unsigned long		fixed_p4d_pfn;
	unsigned long		fixed_pud_pfn;
	unsigned long		fixed_pmd_pfn;
	unsigned long		fixed_pte_pfn;
};

92
static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
93
{
94
	pgprot_t prot = protection_map[idx];
95
	pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
96
	unsigned long val = idx, *ptr = &val;
97

98
	pr_debug("Validating PTE basic (%pGv)\n", ptr);
99 100 101 102 103 104 105 106 107 108

	/*
	 * This test needs to be executed after the given page table entry
	 * is created with pfn_pte() to make sure that protection_map[idx]
	 * does not have the dirty bit enabled from the beginning. This is
	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
	 * dirty bit being set.
	 */
	WARN_ON(pte_dirty(pte_wrprotect(pte)));

109 110 111 112 113 114 115
	WARN_ON(!pte_same(pte, pte));
	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte))));
	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte))));
116 117
	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
118 119
}

120 121 122 123 124
static void __init pte_advanced_tests(struct mm_struct *mm,
				      struct vm_area_struct *vma, pte_t *ptep,
				      unsigned long pfn, unsigned long vaddr,
				      pgprot_t prot)
{
125
	pte_t pte;
126

127 128 129 130 131 132
	/*
	 * Architectures optimize set_pte_at by avoiding TLB flush.
	 * This requires set_pte_at to be not used to update an
	 * existing pte entry. Clear pte before we do set_pte_at
	 */

133
	pr_debug("Validating PTE advanced\n");
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	pte = pfn_pte(pfn, prot);
	set_pte_at(mm, vaddr, ptep, pte);
	ptep_set_wrprotect(mm, vaddr, ptep);
	pte = ptep_get(ptep);
	WARN_ON(pte_write(pte));
	ptep_get_and_clear(mm, vaddr, ptep);
	pte = ptep_get(ptep);
	WARN_ON(!pte_none(pte));

	pte = pfn_pte(pfn, prot);
	pte = pte_wrprotect(pte);
	pte = pte_mkclean(pte);
	set_pte_at(mm, vaddr, ptep, pte);
	pte = pte_mkwrite(pte);
	pte = pte_mkdirty(pte);
	ptep_set_access_flags(vma, vaddr, ptep, pte, 1);
	pte = ptep_get(ptep);
	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
	ptep_get_and_clear_full(mm, vaddr, ptep, 1);
	pte = ptep_get(ptep);
	WARN_ON(!pte_none(pte));

156
	pte = pfn_pte(pfn, prot);
157 158 159 160 161 162 163
	pte = pte_mkyoung(pte);
	set_pte_at(mm, vaddr, ptep, pte);
	ptep_test_and_clear_young(vma, vaddr, ptep);
	pte = ptep_get(ptep);
	WARN_ON(pte_young(pte));
}

164
static void __init pte_savedwrite_tests(struct pgtable_debug_args *args)
165
{
166
	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
167

168 169 170
	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
		return;

171
	pr_debug("Validating PTE saved write\n");
172 173 174
	WARN_ON(!pte_savedwrite(pte_mk_savedwrite(pte_clear_savedwrite(pte))));
	WARN_ON(pte_savedwrite(pte_clear_savedwrite(pte_mk_savedwrite(pte))));
}
175

176
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
177
static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
178
{
179 180
	pgprot_t prot = protection_map[idx];
	unsigned long val = idx, *ptr = &val;
181
	pmd_t pmd;
182

183 184 185
	if (!has_transparent_hugepage())
		return;

186
	pr_debug("Validating PMD basic (%pGv)\n", ptr);
187
	pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
188 189 190 191 192 193 194 195 196 197 198

	/*
	 * This test needs to be executed after the given page table entry
	 * is created with pfn_pmd() to make sure that protection_map[idx]
	 * does not have the dirty bit enabled from the beginning. This is
	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
	 * dirty bit being set.
	 */
	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));


199 200 201 202 203 204 205
	WARN_ON(!pmd_same(pmd, pmd));
	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd))));
	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd))));
206 207
	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
208 209 210 211 212 213 214
	/*
	 * A huge page does not point to next level page table
	 * entry. Hence this must qualify as pmd_bad().
	 */
	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
}

215 216 217
static void __init pmd_advanced_tests(struct mm_struct *mm,
				      struct vm_area_struct *vma, pmd_t *pmdp,
				      unsigned long pfn, unsigned long vaddr,
218
				      pgprot_t prot, pgtable_t pgtable)
219
{
220
	pmd_t pmd;
221 222 223 224

	if (!has_transparent_hugepage())
		return;

225
	pr_debug("Validating PMD advanced\n");
226
	/* Align the address wrt HPAGE_PMD_SIZE */
227
	vaddr &= HPAGE_PMD_MASK;
228

229 230
	pgtable_trans_huge_deposit(mm, pmdp, pgtable);

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
	pmd = pfn_pmd(pfn, prot);
	set_pmd_at(mm, vaddr, pmdp, pmd);
	pmdp_set_wrprotect(mm, vaddr, pmdp);
	pmd = READ_ONCE(*pmdp);
	WARN_ON(pmd_write(pmd));
	pmdp_huge_get_and_clear(mm, vaddr, pmdp);
	pmd = READ_ONCE(*pmdp);
	WARN_ON(!pmd_none(pmd));

	pmd = pfn_pmd(pfn, prot);
	pmd = pmd_wrprotect(pmd);
	pmd = pmd_mkclean(pmd);
	set_pmd_at(mm, vaddr, pmdp, pmd);
	pmd = pmd_mkwrite(pmd);
	pmd = pmd_mkdirty(pmd);
	pmdp_set_access_flags(vma, vaddr, pmdp, pmd, 1);
	pmd = READ_ONCE(*pmdp);
	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
	pmdp_huge_get_and_clear_full(vma, vaddr, pmdp, 1);
	pmd = READ_ONCE(*pmdp);
	WARN_ON(!pmd_none(pmd));

253
	pmd = pmd_mkhuge(pfn_pmd(pfn, prot));
254 255 256 257 258
	pmd = pmd_mkyoung(pmd);
	set_pmd_at(mm, vaddr, pmdp, pmd);
	pmdp_test_and_clear_young(vma, vaddr, pmdp);
	pmd = READ_ONCE(*pmdp);
	WARN_ON(pmd_young(pmd));
259

260 261
	/*  Clear the pte entries  */
	pmdp_huge_get_and_clear(mm, vaddr, pmdp);
262
	pgtable = pgtable_trans_huge_withdraw(mm, pmdp);
263 264
}

265
static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
266
{
267 268 269 270
	pmd_t pmd;

	if (!has_transparent_hugepage())
		return;
271

272
	pr_debug("Validating PMD leaf\n");
273
	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
274

275 276 277 278 279 280 281
	/*
	 * PMD based THP is a leaf entry.
	 */
	pmd = pmd_mkhuge(pmd);
	WARN_ON(!pmd_leaf(pmd));
}

282
static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args)
283
{
284
	pmd_t pmd;
285

286 287 288
	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
		return;

289 290 291
	if (!has_transparent_hugepage())
		return;

292
	pr_debug("Validating PMD saved write\n");
293
	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none);
294 295 296 297
	WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd))));
	WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd))));
}

298
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
299
static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
300
{
301 302
	pgprot_t prot = protection_map[idx];
	unsigned long val = idx, *ptr = &val;
303
	pud_t pud;
304

305 306 307
	if (!has_transparent_hugepage())
		return;

308
	pr_debug("Validating PUD basic (%pGv)\n", ptr);
309
	pud = pfn_pud(args->fixed_pud_pfn, prot);
310 311 312 313 314 315 316 317 318 319

	/*
	 * This test needs to be executed after the given page table entry
	 * is created with pfn_pud() to make sure that protection_map[idx]
	 * does not have the dirty bit enabled from the beginning. This is
	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
	 * dirty bit being set.
	 */
	WARN_ON(pud_dirty(pud_wrprotect(pud)));

320 321
	WARN_ON(!pud_same(pud, pud));
	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
322 323
	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
324 325 326
	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
327 328
	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
329

330
	if (mm_pmd_folded(args->mm))
331 332 333 334 335 336 337 338
		return;

	/*
	 * A huge page does not point to next level page table
	 * entry. Hence this must qualify as pud_bad().
	 */
	WARN_ON(!pud_bad(pud_mkhuge(pud)));
}
339 340 341 342 343 344

static void __init pud_advanced_tests(struct mm_struct *mm,
				      struct vm_area_struct *vma, pud_t *pudp,
				      unsigned long pfn, unsigned long vaddr,
				      pgprot_t prot)
{
345
	pud_t pud;
346 347 348 349

	if (!has_transparent_hugepage())
		return;

350
	pr_debug("Validating PUD advanced\n");
351
	/* Align the address wrt HPAGE_PUD_SIZE */
352
	vaddr &= HPAGE_PUD_MASK;
353

354
	pud = pfn_pud(pfn, prot);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
	set_pud_at(mm, vaddr, pudp, pud);
	pudp_set_wrprotect(mm, vaddr, pudp);
	pud = READ_ONCE(*pudp);
	WARN_ON(pud_write(pud));

#ifndef __PAGETABLE_PMD_FOLDED
	pudp_huge_get_and_clear(mm, vaddr, pudp);
	pud = READ_ONCE(*pudp);
	WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */
	pud = pfn_pud(pfn, prot);
	pud = pud_wrprotect(pud);
	pud = pud_mkclean(pud);
	set_pud_at(mm, vaddr, pudp, pud);
	pud = pud_mkwrite(pud);
	pud = pud_mkdirty(pud);
	pudp_set_access_flags(vma, vaddr, pudp, pud, 1);
	pud = READ_ONCE(*pudp);
	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));

375 376 377 378 379 380 381
#ifndef __PAGETABLE_PMD_FOLDED
	pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1);
	pud = READ_ONCE(*pudp);
	WARN_ON(!pud_none(pud));
#endif /* __PAGETABLE_PMD_FOLDED */

	pud = pfn_pud(pfn, prot);
382 383 384 385 386
	pud = pud_mkyoung(pud);
	set_pud_at(mm, vaddr, pudp, pud);
	pudp_test_and_clear_young(vma, vaddr, pudp);
	pud = READ_ONCE(*pudp);
	WARN_ON(pud_young(pud));
387 388

	pudp_huge_get_and_clear(mm, vaddr, pudp);
389 390
}

391
static void __init pud_leaf_tests(struct pgtable_debug_args *args)
392
{
393 394 395 396
	pud_t pud;

	if (!has_transparent_hugepage())
		return;
397

398
	pr_debug("Validating PUD leaf\n");
399
	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
400 401 402 403 404 405
	/*
	 * PUD based THP is a leaf entry.
	 */
	pud = pud_mkhuge(pud);
	WARN_ON(!pud_leaf(pud));
}
406
#else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
407
static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
408 409 410 411 412 413
static void __init pud_advanced_tests(struct mm_struct *mm,
				      struct vm_area_struct *vma, pud_t *pudp,
				      unsigned long pfn, unsigned long vaddr,
				      pgprot_t prot)
{
}
414
static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
415 416
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
417 418
static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
419 420 421
static void __init pmd_advanced_tests(struct mm_struct *mm,
				      struct vm_area_struct *vma, pmd_t *pmdp,
				      unsigned long pfn, unsigned long vaddr,
422
				      pgprot_t prot, pgtable_t pgtable)
423 424 425 426 427 428 429 430
{
}
static void __init pud_advanced_tests(struct mm_struct *mm,
				      struct vm_area_struct *vma, pud_t *pudp,
				      unsigned long pfn, unsigned long vaddr,
				      pgprot_t prot)
{
}
431 432 433
static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
static void __init pmd_savedwrite_tests(struct pgtable_debug_args *args) { }
434 435 436
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
437 438
static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
{
439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
	pmd_t pmd;

	if (!arch_vmap_pmd_supported(prot))
		return;

	pr_debug("Validating PMD huge\n");
	/*
	 * X86 defined pmd_set_huge() verifies that the given
	 * PMD is not a populated non-leaf entry.
	 */
	WRITE_ONCE(*pmdp, __pmd(0));
	WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot));
	WARN_ON(!pmd_clear_huge(pmdp));
	pmd = READ_ONCE(*pmdp);
	WARN_ON(!pmd_none(pmd));
454
}
455

456 457
static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
{
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
	pud_t pud;

	if (!arch_vmap_pud_supported(prot))
		return;

	pr_debug("Validating PUD huge\n");
	/*
	 * X86 defined pud_set_huge() verifies that the given
	 * PUD is not a populated non-leaf entry.
	 */
	WRITE_ONCE(*pudp, __pud(0));
	WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
	WARN_ON(!pud_clear_huge(pudp));
	pud = READ_ONCE(*pudp);
	WARN_ON(!pud_none(pud));
473
}
474 475 476 477
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { }
static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
478

479
static void __init p4d_basic_tests(struct pgtable_debug_args *args)
480 481 482
{
	p4d_t p4d;

483
	pr_debug("Validating P4D basic\n");
484 485 486 487
	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
	WARN_ON(!p4d_same(p4d, p4d));
}

488
static void __init pgd_basic_tests(struct pgtable_debug_args *args)
489 490 491
{
	pgd_t pgd;

492
	pr_debug("Validating PGD basic\n");
493 494 495 496 497 498 499 500 501 502 503 504
	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
	WARN_ON(!pgd_same(pgd, pgd));
}

#ifndef __PAGETABLE_PUD_FOLDED
static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp)
{
	pud_t pud = READ_ONCE(*pudp);

	if (mm_pmd_folded(mm))
		return;

505
	pr_debug("Validating PUD clear\n");
506 507 508 509 510 511 512 513 514 515 516 517 518 519
	pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
	WRITE_ONCE(*pudp, pud);
	pud_clear(pudp);
	pud = READ_ONCE(*pudp);
	WARN_ON(!pud_none(pud));
}

static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
				      pmd_t *pmdp)
{
	pud_t pud;

	if (mm_pmd_folded(mm))
		return;
520 521

	pr_debug("Validating PUD populate\n");
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
	/*
	 * This entry points to next level page table page.
	 * Hence this must not qualify as pud_bad().
	 */
	pud_populate(mm, pudp, pmdp);
	pud = READ_ONCE(*pudp);
	WARN_ON(pud_bad(pud));
}
#else  /* !__PAGETABLE_PUD_FOLDED */
static void __init pud_clear_tests(struct mm_struct *mm, pud_t *pudp) { }
static void __init pud_populate_tests(struct mm_struct *mm, pud_t *pudp,
				      pmd_t *pmdp)
{
}
#endif /* PAGETABLE_PUD_FOLDED */

#ifndef __PAGETABLE_P4D_FOLDED
static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp)
{
	p4d_t p4d = READ_ONCE(*p4dp);

	if (mm_pud_folded(mm))
		return;

546
	pr_debug("Validating P4D clear\n");
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
	p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
	WRITE_ONCE(*p4dp, p4d);
	p4d_clear(p4dp);
	p4d = READ_ONCE(*p4dp);
	WARN_ON(!p4d_none(p4d));
}

static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
				      pud_t *pudp)
{
	p4d_t p4d;

	if (mm_pud_folded(mm))
		return;

562
	pr_debug("Validating P4D populate\n");
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
	/*
	 * This entry points to next level page table page.
	 * Hence this must not qualify as p4d_bad().
	 */
	pud_clear(pudp);
	p4d_clear(p4dp);
	p4d_populate(mm, p4dp, pudp);
	p4d = READ_ONCE(*p4dp);
	WARN_ON(p4d_bad(p4d));
}

static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp)
{
	pgd_t pgd = READ_ONCE(*pgdp);

	if (mm_p4d_folded(mm))
		return;

581
	pr_debug("Validating PGD clear\n");
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
	pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
	WRITE_ONCE(*pgdp, pgd);
	pgd_clear(pgdp);
	pgd = READ_ONCE(*pgdp);
	WARN_ON(!pgd_none(pgd));
}

static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
				      p4d_t *p4dp)
{
	pgd_t pgd;

	if (mm_p4d_folded(mm))
		return;

597
	pr_debug("Validating PGD populate\n");
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
	/*
	 * This entry points to next level page table page.
	 * Hence this must not qualify as pgd_bad().
	 */
	p4d_clear(p4dp);
	pgd_clear(pgdp);
	pgd_populate(mm, pgdp, p4dp);
	pgd = READ_ONCE(*pgdp);
	WARN_ON(pgd_bad(pgd));
}
#else  /* !__PAGETABLE_P4D_FOLDED */
static void __init p4d_clear_tests(struct mm_struct *mm, p4d_t *p4dp) { }
static void __init pgd_clear_tests(struct mm_struct *mm, pgd_t *pgdp) { }
static void __init p4d_populate_tests(struct mm_struct *mm, p4d_t *p4dp,
				      pud_t *pudp)
{
}
static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
				      p4d_t *p4dp)
{
}
#endif /* PAGETABLE_P4D_FOLDED */

static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
622 623
				   unsigned long pfn, unsigned long vaddr,
				   pgprot_t prot)
624
{
625
	pte_t pte = pfn_pte(pfn, prot);
626

627
	pr_debug("Validating PTE clear\n");
628
#ifndef CONFIG_RISCV
629
	pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
630
#endif
631 632 633
	set_pte_at(mm, vaddr, ptep, pte);
	barrier();
	pte_clear(mm, vaddr, ptep);
634
	pte = ptep_get(ptep);
635 636 637 638 639 640 641
	WARN_ON(!pte_none(pte));
}

static void __init pmd_clear_tests(struct mm_struct *mm, pmd_t *pmdp)
{
	pmd_t pmd = READ_ONCE(*pmdp);

642
	pr_debug("Validating PMD clear\n");
643 644 645 646 647 648 649 650 651 652 653 654
	pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
	WRITE_ONCE(*pmdp, pmd);
	pmd_clear(pmdp);
	pmd = READ_ONCE(*pmdp);
	WARN_ON(!pmd_none(pmd));
}

static void __init pmd_populate_tests(struct mm_struct *mm, pmd_t *pmdp,
				      pgtable_t pgtable)
{
	pmd_t pmd;

655
	pr_debug("Validating PMD populate\n");
656 657 658 659 660 661 662 663 664
	/*
	 * This entry points to next level page table page.
	 * Hence this must not qualify as pmd_bad().
	 */
	pmd_populate(mm, pmdp, pgtable);
	pmd = READ_ONCE(*pmdp);
	WARN_ON(pmd_bad(pmd));
}

665
static void __init pte_special_tests(struct pgtable_debug_args *args)
666
{
667
	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
668 669 670 671

	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
		return;

672
	pr_debug("Validating PTE special\n");
673 674 675
	WARN_ON(!pte_special(pte_mkspecial(pte)));
}

676
static void __init pte_protnone_tests(struct pgtable_debug_args *args)
677
{
678
	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
679 680 681 682

	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
		return;

683
	pr_debug("Validating PTE protnone\n");
684 685 686 687 688
	WARN_ON(!pte_protnone(pte));
	WARN_ON(!pte_present(pte));
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
689
static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
690
{
691
	pmd_t pmd;
692 693 694 695

	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
		return;

696 697 698
	if (!has_transparent_hugepage())
		return;

699
	pr_debug("Validating PMD protnone\n");
700
	pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
701 702 703 704
	WARN_ON(!pmd_protnone(pmd));
	WARN_ON(!pmd_present(pmd));
}
#else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
705
static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
706 707 708
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
709
static void __init pte_devmap_tests(struct pgtable_debug_args *args)
710
{
711
	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
712

713
	pr_debug("Validating PTE devmap\n");
714 715 716 717
	WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
718
static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
719
{
720 721 722 723
	pmd_t pmd;

	if (!has_transparent_hugepage())
		return;
724

725
	pr_debug("Validating PMD devmap\n");
726
	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
727 728 729 730
	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
}

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
731
static void __init pud_devmap_tests(struct pgtable_debug_args *args)
732
{
733 734 735 736
	pud_t pud;

	if (!has_transparent_hugepage())
		return;
737

738
	pr_debug("Validating PUD devmap\n");
739
	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
740 741 742
	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
}
#else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
743
static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
744 745
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#else  /* CONFIG_TRANSPARENT_HUGEPAGE */
746 747
static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
748 749
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#else
750 751 752
static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
753 754 755 756 757 758 759 760 761
#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */

static void __init pte_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
{
	pte_t pte = pfn_pte(pfn, prot);

	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
		return;

762
	pr_debug("Validating PTE soft dirty\n");
763 764 765 766 767 768 769 770 771 772 773
	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
}

static void __init pte_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
{
	pte_t pte = pfn_pte(pfn, prot);

	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
		return;

774
	pr_debug("Validating PTE swap soft dirty\n");
775 776 777 778 779 780 781
	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
{
782
	pmd_t pmd;
783 784 785 786

	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
		return;

787 788 789
	if (!has_transparent_hugepage())
		return;

790
	pr_debug("Validating PMD soft dirty\n");
791
	pmd = pfn_pmd(pfn, prot);
792 793 794 795 796 797
	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
}

static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
{
798
	pmd_t pmd;
799 800 801 802 803

	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
		return;

804 805 806
	if (!has_transparent_hugepage())
		return;

807
	pr_debug("Validating PMD swap soft dirty\n");
808
	pmd = pfn_pmd(pfn, prot);
809 810 811
	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
}
812
#else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
813 814 815 816
static void __init pmd_soft_dirty_tests(unsigned long pfn, pgprot_t prot) { }
static void __init pmd_swap_soft_dirty_tests(unsigned long pfn, pgprot_t prot)
{
}
817
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
818 819 820 821 822 823

static void __init pte_swap_tests(unsigned long pfn, pgprot_t prot)
{
	swp_entry_t swp;
	pte_t pte;

824
	pr_debug("Validating PTE swap\n");
825 826 827 828 829 830 831 832 833 834 835 836
	pte = pfn_pte(pfn, prot);
	swp = __pte_to_swp_entry(pte);
	pte = __swp_entry_to_pte(swp);
	WARN_ON(pfn != pte_pfn(pte));
}

#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot)
{
	swp_entry_t swp;
	pmd_t pmd;

837 838 839
	if (!has_transparent_hugepage())
		return;

840
	pr_debug("Validating PMD swap\n");
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
	pmd = pfn_pmd(pfn, prot);
	swp = __pmd_to_swp_entry(pmd);
	pmd = __swp_entry_to_pmd(swp);
	WARN_ON(pfn != pmd_pfn(pmd));
}
#else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
static void __init pmd_swap_tests(unsigned long pfn, pgprot_t prot) { }
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */

static void __init swap_migration_tests(void)
{
	struct page *page;
	swp_entry_t swp;

	if (!IS_ENABLED(CONFIG_MIGRATION))
		return;
857 858

	pr_debug("Validating swap migration\n");
859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
	/*
	 * swap_migration_tests() requires a dedicated page as it needs to
	 * be locked before creating a migration entry from it. Locking the
	 * page that actually maps kernel text ('start_kernel') can be real
	 * problematic. Lets allocate a dedicated page explicitly for this
	 * purpose that will be freed subsequently.
	 */
	page = alloc_page(GFP_KERNEL);
	if (!page) {
		pr_err("page allocation failed\n");
		return;
	}

	/*
	 * make_migration_entry() expects given page to be
	 * locked, otherwise it stumbles upon a BUG_ON().
	 */
	__SetPageLocked(page);
877
	swp = make_writable_migration_entry(page_to_pfn(page));
878
	WARN_ON(!is_migration_entry(swp));
879
	WARN_ON(!is_writable_migration_entry(swp));
880

881
	swp = make_readable_migration_entry(swp_offset(swp));
882
	WARN_ON(!is_migration_entry(swp));
883
	WARN_ON(is_writable_migration_entry(swp));
884

885
	swp = make_readable_migration_entry(page_to_pfn(page));
886
	WARN_ON(!is_migration_entry(swp));
887
	WARN_ON(is_writable_migration_entry(swp));
888 889 890 891 892
	__ClearPageLocked(page);
	__free_page(page);
}

#ifdef CONFIG_HUGETLB_PAGE
893
static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
894 895 896 897
{
	struct page *page;
	pte_t pte;

898
	pr_debug("Validating HugeTLB basic\n");
899 900 901 902
	/*
	 * Accessing the page associated with the pfn is safe here,
	 * as it was previously derived from a real kernel symbol.
	 */
903 904
	page = pfn_to_page(args->fixed_pmd_pfn);
	pte = mk_huge_pte(page, args->page_prot);
905 906 907 908 909 910

	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));

#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
911
	pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
912 913 914 915 916

	WARN_ON(!pte_huge(pte_mkhuge(pte)));
#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
}
#else  /* !CONFIG_HUGETLB_PAGE */
917
static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
918 919 920 921 922 923 924 925 926 927
#endif /* CONFIG_HUGETLB_PAGE */

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot)
{
	pmd_t pmd;

	if (!has_transparent_hugepage())
		return;

928
	pr_debug("Validating PMD based THP\n");
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
	/*
	 * pmd_trans_huge() and pmd_present() must return positive after
	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
	 * optimization for transparent huge page. pmd_trans_huge() must
	 * be true if pmd_page() returns a valid THP to avoid taking the
	 * pmd_lock when others walk over non transhuge pmds (i.e. there
	 * are no THP allocated). Especially when splitting a THP and
	 * removing the present bit from the pmd, pmd_trans_huge() still
	 * needs to return true. pmd_present() should be true whenever
	 * pmd_trans_huge() returns true.
	 */
	pmd = pfn_pmd(pfn, prot);
	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));

#ifndef __HAVE_ARCH_PMDP_INVALIDATE
	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
}

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot)
{
	pud_t pud;

	if (!has_transparent_hugepage())
		return;

957
	pr_debug("Validating PUD based THP\n");
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
	pud = pfn_pud(pfn, prot);
	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));

	/*
	 * pud_mkinvalid() has been dropped for now. Enable back
	 * these tests when it comes back with a modified pud_present().
	 *
	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
	 */
}
#else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
static void __init pmd_thp_tests(unsigned long pfn, pgprot_t prot) { }
static void __init pud_thp_tests(unsigned long pfn, pgprot_t prot) { }
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

977 978 979 980 981 982 983 984 985 986 987 988
static unsigned long __init get_random_vaddr(void)
{
	unsigned long random_vaddr, random_pages, total_user_pages;

	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;

	random_pages = get_random_long() % total_user_pages;
	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;

	return random_vaddr;
}

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
static void __init destroy_args(struct pgtable_debug_args *args)
{
	struct page *page = NULL;

	/* Free (huge) page */
	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
	    IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
	    has_transparent_hugepage() &&
	    args->pud_pfn != ULONG_MAX) {
		if (args->is_contiguous_page) {
			free_contig_range(args->pud_pfn,
					  (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
		} else {
			page = pfn_to_page(args->pud_pfn);
			__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
		}

		args->pud_pfn = ULONG_MAX;
		args->pmd_pfn = ULONG_MAX;
		args->pte_pfn = ULONG_MAX;
	}

	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
	    has_transparent_hugepage() &&
	    args->pmd_pfn != ULONG_MAX) {
		if (args->is_contiguous_page) {
			free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
		} else {
			page = pfn_to_page(args->pmd_pfn);
			__free_pages(page, HPAGE_PMD_ORDER);
		}

		args->pmd_pfn = ULONG_MAX;
		args->pte_pfn = ULONG_MAX;
	}

	if (args->pte_pfn != ULONG_MAX) {
		page = pfn_to_page(args->pte_pfn);
		__free_pages(page, 0);

		args->pte_pfn = ULONG_MAX;
	}

	/* Free page table entries */
	if (args->start_ptep) {
		pte_free(args->mm, args->start_ptep);
		mm_dec_nr_ptes(args->mm);
	}

	if (args->start_pmdp) {
		pmd_free(args->mm, args->start_pmdp);
		mm_dec_nr_pmds(args->mm);
	}

	if (args->start_pudp) {
		pud_free(args->mm, args->start_pudp);
		mm_dec_nr_puds(args->mm);
	}

	if (args->start_p4dp)
		p4d_free(args->mm, args->start_p4dp);

	/* Free vma and mm struct */
	if (args->vma)
		vm_area_free(args->vma);

	if (args->mm)
		mmdrop(args->mm);
}

static struct page * __init
debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
{
	struct page *page = NULL;

#ifdef CONFIG_CONTIG_ALLOC
	if (order >= MAX_ORDER) {
		page = alloc_contig_pages((1 << order), GFP_KERNEL,
					  first_online_node, NULL);
		if (page) {
			args->is_contiguous_page = true;
			return page;
		}
	}
#endif

	if (order < MAX_ORDER)
		page = alloc_pages(GFP_KERNEL, order);

	return page;
}

static int __init init_args(struct pgtable_debug_args *args)
{
	struct page *page = NULL;
	phys_addr_t phys;
	int ret = 0;

	/*
	 * Initialize the debugging data.
	 *
	 * __P000 (or even __S000) will help create page table entries with
	 * PROT_NONE permission as required for pxx_protnone_tests().
	 */
	memset(args, 0, sizeof(*args));
	args->vaddr              = get_random_vaddr();
	args->page_prot          = vm_get_page_prot(VMFLAGS);
	args->page_prot_none     = __P000;
	args->is_contiguous_page = false;
	args->pud_pfn            = ULONG_MAX;
	args->pmd_pfn            = ULONG_MAX;
	args->pte_pfn            = ULONG_MAX;
	args->fixed_pgd_pfn      = ULONG_MAX;
	args->fixed_p4d_pfn      = ULONG_MAX;
	args->fixed_pud_pfn      = ULONG_MAX;
	args->fixed_pmd_pfn      = ULONG_MAX;
	args->fixed_pte_pfn      = ULONG_MAX;

	/* Allocate mm and vma */
	args->mm = mm_alloc();
	if (!args->mm) {
		pr_err("Failed to allocate mm struct\n");
		ret = -ENOMEM;
		goto error;
	}

	args->vma = vm_area_alloc(args->mm);
	if (!args->vma) {
		pr_err("Failed to allocate vma\n");
		ret = -ENOMEM;
		goto error;
	}

	/*
	 * Allocate page table entries. They will be modified in the tests.
	 * Lets save the page table entries so that they can be released
	 * when the tests are completed.
	 */
	args->pgdp = pgd_offset(args->mm, args->vaddr);
	args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
	if (!args->p4dp) {
		pr_err("Failed to allocate p4d entries\n");
		ret = -ENOMEM;
		goto error;
	}
	args->start_p4dp = p4d_offset(args->pgdp, 0UL);
	WARN_ON(!args->start_p4dp);

	args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
	if (!args->pudp) {
		pr_err("Failed to allocate pud entries\n");
		ret = -ENOMEM;
		goto error;
	}
	args->start_pudp = pud_offset(args->p4dp, 0UL);
	WARN_ON(!args->start_pudp);

	args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
	if (!args->pmdp) {
		pr_err("Failed to allocate pmd entries\n");
		ret = -ENOMEM;
		goto error;
	}
	args->start_pmdp = pmd_offset(args->pudp, 0UL);
	WARN_ON(!args->start_pmdp);

	if (pte_alloc(args->mm, args->pmdp)) {
		pr_err("Failed to allocate pte entries\n");
		ret = -ENOMEM;
		goto error;
	}
	args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
	WARN_ON(!args->start_ptep);

	/*
	 * PFN for mapping at PTE level is determined from a standard kernel
	 * text symbol. But pfns for higher page table levels are derived by
	 * masking lower bits of this real pfn. These derived pfns might not
	 * exist on the platform but that does not really matter as pfn_pxx()
	 * helpers will still create appropriate entries for the test. This
	 * helps avoid large memory block allocations to be used for mapping
	 * at higher page table levels in some of the tests.
	 */
	phys = __pa_symbol(&start_kernel);
	args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
	args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
	args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
	args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
	args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
	WARN_ON(!pfn_valid(args->fixed_pte_pfn));

	/*
	 * Allocate (huge) pages because some of the tests need to access
	 * the data in the pages. The corresponding tests will be skipped
	 * if we fail to allocate (huge) pages.
	 */
	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
	    IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
	    has_transparent_hugepage()) {
		page = debug_vm_pgtable_alloc_huge_page(args,
				HPAGE_PUD_SHIFT - PAGE_SHIFT);
		if (page) {
			args->pud_pfn = page_to_pfn(page);
			args->pmd_pfn = args->pud_pfn;
			args->pte_pfn = args->pud_pfn;
			return 0;
		}
	}

	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
	    has_transparent_hugepage()) {
		page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
		if (page) {
			args->pmd_pfn = page_to_pfn(page);
			args->pte_pfn = args->pmd_pfn;
			return 0;
		}
	}

	page = alloc_pages(GFP_KERNEL, 0);
	if (page)
		args->pte_pfn = page_to_pfn(page);

	return 0;

error:
	destroy_args(args);
	return ret;
}

1219 1220
static int __init debug_vm_pgtable(void)
{
1221
	struct pgtable_debug_args args;
1222
	struct vm_area_struct *vma;
1223 1224 1225 1226 1227 1228 1229
	struct mm_struct *mm;
	pgd_t *pgdp;
	p4d_t *p4dp, *saved_p4dp;
	pud_t *pudp, *saved_pudp;
	pmd_t *pmdp, *saved_pmdp, pmd;
	pte_t *ptep;
	pgtable_t saved_ptep;
1230
	pgprot_t prot;
1231 1232
	phys_addr_t paddr;
	unsigned long vaddr, pte_aligned, pmd_aligned;
1233
	unsigned long pud_aligned;
1234
	spinlock_t *ptl = NULL;
1235
	int idx, ret;
1236 1237

	pr_info("Validating architecture page table helpers\n");
1238 1239 1240 1241
	ret = init_args(&args);
	if (ret)
		return ret;

1242 1243 1244 1245 1246 1247 1248 1249
	prot = vm_get_page_prot(VMFLAGS);
	vaddr = get_random_vaddr();
	mm = mm_alloc();
	if (!mm) {
		pr_err("mm_struct allocation failed\n");
		return 1;
	}

1250 1251 1252 1253 1254 1255
	vma = vm_area_alloc(mm);
	if (!vma) {
		pr_err("vma allocation failed\n");
		return 1;
	}

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
	/*
	 * PFN for mapping at PTE level is determined from a standard kernel
	 * text symbol. But pfns for higher page table levels are derived by
	 * masking lower bits of this real pfn. These derived pfns might not
	 * exist on the platform but that does not really matter as pfn_pxx()
	 * helpers will still create appropriate entries for the test. This
	 * helps avoid large memory block allocations to be used for mapping
	 * at higher page table levels.
	 */
	paddr = __pa_symbol(&start_kernel);

	pte_aligned = (paddr & PAGE_MASK) >> PAGE_SHIFT;
	pmd_aligned = (paddr & PMD_MASK) >> PAGE_SHIFT;
	pud_aligned = (paddr & PUD_MASK) >> PAGE_SHIFT;
	WARN_ON(!pfn_valid(pte_aligned));

	pgdp = pgd_offset(mm, vaddr);
	p4dp = p4d_alloc(mm, pgdp, vaddr);
	pudp = pud_alloc(mm, p4dp, vaddr);
	pmdp = pmd_alloc(mm, pudp, vaddr);
1276 1277 1278 1279 1280 1281 1282
	/*
	 * Allocate pgtable_t
	 */
	if (pte_alloc(mm, pmdp)) {
		pr_err("pgtable allocation failed\n");
		return 1;
	}
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295

	/*
	 * Save all the page table page addresses as the page table
	 * entries will be used for testing with random or garbage
	 * values. These saved addresses will be used for freeing
	 * page table pages.
	 */
	pmd = READ_ONCE(*pmdp);
	saved_p4dp = p4d_offset(pgdp, 0UL);
	saved_pudp = pud_offset(p4dp, 0UL);
	saved_pmdp = pmd_offset(pudp, 0UL);
	saved_ptep = pmd_pgtable(pmd);

1296 1297 1298 1299 1300 1301 1302
	/*
	 * Iterate over the protection_map[] to make sure that all
	 * the basic page table transformation validations just hold
	 * true irrespective of the starting protection value for a
	 * given page table entry.
	 */
	for (idx = 0; idx < ARRAY_SIZE(protection_map); idx++) {
1303 1304 1305
		pte_basic_tests(&args, idx);
		pmd_basic_tests(&args, idx);
		pud_basic_tests(&args, idx);
1306 1307 1308 1309 1310 1311 1312 1313 1314
	}

	/*
	 * Both P4D and PGD level tests are very basic which do not
	 * involve creating page table entries from the protection
	 * value and the given pfn. Hence just keep them out from
	 * the above iteration for now to save some test execution
	 * time.
	 */
1315 1316
	p4d_basic_tests(&args);
	pgd_basic_tests(&args);
1317

1318 1319
	pmd_leaf_tests(&args);
	pud_leaf_tests(&args);
1320

1321 1322
	pte_savedwrite_tests(&args);
	pmd_savedwrite_tests(&args);
1323

1324 1325 1326
	pte_special_tests(&args);
	pte_protnone_tests(&args);
	pmd_protnone_tests(&args);
1327

1328 1329 1330
	pte_devmap_tests(&args);
	pmd_devmap_tests(&args);
	pud_devmap_tests(&args);
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344

	pte_soft_dirty_tests(pte_aligned, prot);
	pmd_soft_dirty_tests(pmd_aligned, prot);
	pte_swap_soft_dirty_tests(pte_aligned, prot);
	pmd_swap_soft_dirty_tests(pmd_aligned, prot);

	pte_swap_tests(pte_aligned, prot);
	pmd_swap_tests(pmd_aligned, prot);

	swap_migration_tests();

	pmd_thp_tests(pmd_aligned, prot);
	pud_thp_tests(pud_aligned, prot);

1345
	hugetlb_basic_tests(&args);
1346

1347 1348 1349 1350
	/*
	 * Page table modifying tests. They need to hold
	 * proper page table lock.
	 */
1351

1352
	ptep = pte_offset_map_lock(mm, pmdp, vaddr, &ptl);
1353
	pte_clear_tests(mm, ptep, pte_aligned, vaddr, prot);
1354
	pte_advanced_tests(mm, vma, ptep, pte_aligned, vaddr, prot);
1355
	pte_unmap_unlock(ptep, ptl);
1356

1357 1358
	ptl = pmd_lock(mm, pmdp);
	pmd_clear_tests(mm, pmdp);
1359
	pmd_advanced_tests(mm, vma, pmdp, pmd_aligned, vaddr, prot, saved_ptep);
1360
	pmd_huge_tests(pmdp, pmd_aligned, prot);
1361 1362 1363 1364 1365 1366
	pmd_populate_tests(mm, pmdp, saved_ptep);
	spin_unlock(ptl);

	ptl = pud_lock(mm, pudp);
	pud_clear_tests(mm, pudp);
	pud_advanced_tests(mm, vma, pudp, pud_aligned, vaddr, prot);
1367
	pud_huge_tests(pudp, pud_aligned, prot);
1368 1369
	pud_populate_tests(mm, pudp, saved_pmdp);
	spin_unlock(ptl);
1370

1371 1372 1373
	spin_lock(&mm->page_table_lock);
	p4d_clear_tests(mm, p4dp);
	pgd_clear_tests(mm, pgdp);
1374 1375
	p4d_populate_tests(mm, p4dp, saved_pudp);
	pgd_populate_tests(mm, pgdp, saved_p4dp);
1376
	spin_unlock(&mm->page_table_lock);
1377

1378 1379 1380 1381 1382
	p4d_free(mm, saved_p4dp);
	pud_free(mm, saved_pudp);
	pmd_free(mm, saved_pmdp);
	pte_free(mm, saved_ptep);

1383
	vm_area_free(vma);
1384 1385 1386 1387
	mm_dec_nr_puds(mm);
	mm_dec_nr_pmds(mm);
	mm_dec_nr_ptes(mm);
	mmdrop(mm);
1388 1389

	destroy_args(&args);
1390 1391 1392
	return 0;
}
late_initcall(debug_vm_pgtable);