set_memory.c 55.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3
/*
 * Copyright 2002 Andi Kleen, SuSE Labs.
L
Linus Torvalds 已提交
4
 * Thanks to Ben LaHaise for precious feedback.
5
 */
L
Linus Torvalds 已提交
6
#include <linux/highmem.h>
M
Mike Rapoport 已提交
7
#include <linux/memblock.h>
8 9
#include <linux/sched.h>
#include <linux/mm.h>
10
#include <linux/interrupt.h>
11 12
#include <linux/seq_file.h>
#include <linux/debugfs.h>
13
#include <linux/pfn.h>
14
#include <linux/percpu.h>
15
#include <linux/gfp.h>
16
#include <linux/pci.h>
17
#include <linux/vmalloc.h>
18
#include <linux/libnvdimm.h>
19

20
#include <asm/e820/api.h>
L
Linus Torvalds 已提交
21 22
#include <asm/processor.h>
#include <asm/tlbflush.h>
D
Dave Jones 已提交
23
#include <asm/sections.h>
24
#include <asm/setup.h>
25
#include <linux/uaccess.h>
26
#include <asm/pgalloc.h>
T
Thomas Gleixner 已提交
27
#include <asm/proto.h>
28
#include <asm/memtype.h>
L
Laura Abbott 已提交
29
#include <asm/set_memory.h>
L
Linus Torvalds 已提交
30

31
#include "../mm_internal.h"
32

I
Ingo Molnar 已提交
33 34 35
/*
 * The current flushing context - we pass it instead of 5 arguments:
 */
T
Thomas Gleixner 已提交
36
struct cpa_data {
37
	unsigned long	*vaddr;
38
	pgd_t		*pgd;
T
Thomas Gleixner 已提交
39 40
	pgprot_t	mask_set;
	pgprot_t	mask_clr;
41
	unsigned long	numpages;
42
	unsigned long	curpage;
T
Thomas Gleixner 已提交
43
	unsigned long	pfn;
44 45
	unsigned int	flags;
	unsigned int	force_split		: 1,
46
			force_static_prot	: 1;
47
	struct page	**pages;
T
Thomas Gleixner 已提交
48 49
};

50
enum cpa_warn {
51
	CPA_CONFLICT,
52 53 54 55 56 57
	CPA_PROTECT,
	CPA_DETECT,
};

static const int cpa_warn_level = CPA_PROTECT;

58 59 60 61 62 63 64 65
/*
 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
 * entries change the page attribute in parallel to some other cpu
 * splitting a large page entry along with changing the attribute.
 */
static DEFINE_SPINLOCK(cpa_lock);

66 67
#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
68
#define CPA_PAGES_ARRAY 4
69
#define CPA_NO_CHECK_ALIAS 8 /* Do not search for aliases */
70

71
#ifdef CONFIG_PROC_FS
72 73
static unsigned long direct_pages_count[PG_LEVEL_NUM];

74
void update_page_count(int level, unsigned long pages)
75 76
{
	/* Protect against CPA */
A
Andrea Arcangeli 已提交
77
	spin_lock(&pgd_lock);
78
	direct_pages_count[level] += pages;
A
Andrea Arcangeli 已提交
79
	spin_unlock(&pgd_lock);
80 81 82 83
}

static void split_page_count(int level)
{
84 85 86
	if (direct_pages_count[level] == 0)
		return;

87 88 89 90
	direct_pages_count[level]--;
	direct_pages_count[level - 1] += PTRS_PER_PTE;
}

91
void arch_report_meminfo(struct seq_file *m)
92
{
93
	seq_printf(m, "DirectMap4k:    %8lu kB\n",
H
Hugh Dickins 已提交
94 95
			direct_pages_count[PG_LEVEL_4K] << 2);
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
96
	seq_printf(m, "DirectMap2M:    %8lu kB\n",
H
Hugh Dickins 已提交
97 98
			direct_pages_count[PG_LEVEL_2M] << 11);
#else
99
	seq_printf(m, "DirectMap4M:    %8lu kB\n",
H
Hugh Dickins 已提交
100 101 102
			direct_pages_count[PG_LEVEL_2M] << 12);
#endif
	if (direct_gbpages)
103
		seq_printf(m, "DirectMap1G:    %8lu kB\n",
H
Hugh Dickins 已提交
104
			direct_pages_count[PG_LEVEL_1G] << 20);
105
}
106 107 108
#else
static inline void split_page_count(int level) { }
#endif
109

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
#ifdef CONFIG_X86_CPA_STATISTICS

static unsigned long cpa_1g_checked;
static unsigned long cpa_1g_sameprot;
static unsigned long cpa_1g_preserved;
static unsigned long cpa_2m_checked;
static unsigned long cpa_2m_sameprot;
static unsigned long cpa_2m_preserved;
static unsigned long cpa_4k_install;

static inline void cpa_inc_1g_checked(void)
{
	cpa_1g_checked++;
}

static inline void cpa_inc_2m_checked(void)
{
	cpa_2m_checked++;
}

static inline void cpa_inc_4k_install(void)
{
	cpa_4k_install++;
}

static inline void cpa_inc_lp_sameprot(int level)
{
	if (level == PG_LEVEL_1G)
		cpa_1g_sameprot++;
	else
		cpa_2m_sameprot++;
}

static inline void cpa_inc_lp_preserved(int level)
{
	if (level == PG_LEVEL_1G)
		cpa_1g_preserved++;
	else
		cpa_2m_preserved++;
}

static int cpastats_show(struct seq_file *m, void *p)
{
	seq_printf(m, "1G pages checked:     %16lu\n", cpa_1g_checked);
	seq_printf(m, "1G pages sameprot:    %16lu\n", cpa_1g_sameprot);
	seq_printf(m, "1G pages preserved:   %16lu\n", cpa_1g_preserved);
	seq_printf(m, "2M pages checked:     %16lu\n", cpa_2m_checked);
	seq_printf(m, "2M pages sameprot:    %16lu\n", cpa_2m_sameprot);
	seq_printf(m, "2M pages preserved:   %16lu\n", cpa_2m_preserved);
	seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
	return 0;
}

static int cpastats_open(struct inode *inode, struct file *file)
{
	return single_open(file, cpastats_show, NULL);
}

static const struct file_operations cpastats_fops = {
	.open		= cpastats_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static int __init cpa_stats_init(void)
{
	debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
			    &cpastats_fops);
	return 0;
}
late_initcall(cpa_stats_init);
#else
static inline void cpa_inc_1g_checked(void) { }
static inline void cpa_inc_2m_checked(void) { }
static inline void cpa_inc_4k_install(void) { }
static inline void cpa_inc_lp_sameprot(int level) { }
static inline void cpa_inc_lp_preserved(int level) { }
#endif


191 192 193 194 195 196 197 198 199 200 201 202
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
{
	return addr >= start && addr < end;
}

static inline int
within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
{
	return addr >= start && addr <= end;
}

T
Thomas Gleixner 已提交
203 204 205 206
#ifdef CONFIG_X86_64

static inline unsigned long highmap_start_pfn(void)
{
207
	return __pa_symbol(_text) >> PAGE_SHIFT;
T
Thomas Gleixner 已提交
208 209 210 211
}

static inline unsigned long highmap_end_pfn(void)
{
212 213
	/* Do not reference physical address outside the kernel. */
	return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
T
Thomas Gleixner 已提交
214 215
}

216
static bool __cpa_pfn_in_highmap(unsigned long pfn)
I
Ingo Molnar 已提交
217
{
218 219 220 221 222
	/*
	 * Kernel text has an alias mapping at a high address, known
	 * here as "highmap".
	 */
	return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
223 224
}

225 226 227
#else

static bool __cpa_pfn_in_highmap(unsigned long pfn)
228
{
229 230
	/* There is no highmap on 32-bit */
	return false;
231 232
}

233 234
#endif

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
/*
 * See set_mce_nospec().
 *
 * Machine check recovery code needs to change cache mode of poisoned pages to
 * UC to avoid speculative access logging another error. But passing the
 * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a
 * speculative access. So we cheat and flip the top bit of the address. This
 * works fine for the code that updates the page tables. But at the end of the
 * process we need to flush the TLB and cache and the non-canonical address
 * causes a #GP fault when used by the INVLPG and CLFLUSH instructions.
 *
 * But in the common case we already have a canonical address. This code
 * will fix the top bit if needed and is a no-op otherwise.
 */
static inline unsigned long fix_addr(unsigned long addr)
{
#ifdef CONFIG_X86_64
	return (long)(addr << 1) >> 1;
#else
	return addr;
#endif
}

258
static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
259 260 261 262 263 264 265 266 267 268 269 270 271
{
	if (cpa->flags & CPA_PAGES_ARRAY) {
		struct page *page = cpa->pages[idx];

		if (unlikely(PageHighMem(page)))
			return 0;

		return (unsigned long)page_address(page);
	}

	if (cpa->flags & CPA_ARRAY)
		return cpa->vaddr[idx];

272
	return *cpa->vaddr + idx * PAGE_SIZE;
273 274
}

T
Thomas Gleixner 已提交
275 276 277
/*
 * Flushing functions
 */
278

P
Peter Zijlstra 已提交
279
static void clflush_cache_range_opt(void *vaddr, unsigned int size)
T
Thomas Gleixner 已提交
280
{
281 282
	const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
	void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
283
	void *vend = vaddr + size;
284 285 286

	if (p >= vend)
		return;
T
Thomas Gleixner 已提交
287

288
	for (; p < vend; p += clflush_size)
289
		clflushopt(p);
P
Peter Zijlstra 已提交
290
}
I
Ingo Molnar 已提交
291

P
Peter Zijlstra 已提交
292 293 294 295 296 297 298 299 300 301 302 303
/**
 * clflush_cache_range - flush a cache range with clflush
 * @vaddr:	virtual start address
 * @size:	number of bytes to flush
 *
 * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
 * SFENCE to avoid ordering issues.
 */
void clflush_cache_range(void *vaddr, unsigned int size)
{
	mb();
	clflush_cache_range_opt(vaddr, size);
304
	mb();
T
Thomas Gleixner 已提交
305
}
306
EXPORT_SYMBOL_GPL(clflush_cache_range);
T
Thomas Gleixner 已提交
307

308
#ifdef CONFIG_ARCH_HAS_PMEM_API
309 310 311 312 313
void arch_invalidate_pmem(void *addr, size_t size)
{
	clflush_cache_range(addr, size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
314
#endif
315

316
static void __cpa_flush_all(void *arg)
T
Thomas Gleixner 已提交
317
{
318 319
	unsigned long cache = (unsigned long)arg;

T
Thomas Gleixner 已提交
320 321 322 323 324 325
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

326
	if (cache && boot_cpu_data.x86 >= 4)
T
Thomas Gleixner 已提交
327 328 329
		wbinvd();
}

330
static void cpa_flush_all(unsigned long cache)
T
Thomas Gleixner 已提交
331
{
332
	BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
T
Thomas Gleixner 已提交
333

334
	on_each_cpu(__cpa_flush_all, (void *) cache, 1);
T
Thomas Gleixner 已提交
335 336
}

337
static void __cpa_flush_tlb(void *data)
338
{
339 340
	struct cpa_data *cpa = data;
	unsigned int i;
341

342
	for (i = 0; i < cpa->numpages; i++)
343
		__flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
344 345
}

346
static void cpa_flush(struct cpa_data *data, int cache)
347
{
348
	struct cpa_data *cpa = data;
349
	unsigned int i;
350

351
	BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
352

353 354
	if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		cpa_flush_all(cache);
355
		return;
I
Ingo Molnar 已提交
356
	}
357

358
	if (cpa->numpages <= tlb_single_page_flush_ceiling)
359
		on_each_cpu(__cpa_flush_tlb, cpa, 1);
360 361
	else
		flush_tlb_all();
362 363

	if (!cache)
364 365
		return;

P
Peter Zijlstra 已提交
366
	mb();
367 368 369
	for (i = 0; i < cpa->numpages; i++) {
		unsigned long addr = __cpa_addr(cpa, i);
		unsigned int level;
370

371
		pte_t *pte = lookup_address(addr, &level);
372 373 374 375 376

		/*
		 * Only flush present addresses:
		 */
		if (pte && (pte_val(*pte) & _PAGE_PRESENT))
377
			clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
378
	}
P
Peter Zijlstra 已提交
379
	mb();
380 381
}

382 383 384 385 386 387 388
static bool overlaps(unsigned long r1_start, unsigned long r1_end,
		     unsigned long r2_start, unsigned long r2_end)
{
	return (r1_start <= r2_end && r1_end >= r2_start) ||
		(r2_start <= r1_end && r2_end >= r1_start);
}

389
#ifdef CONFIG_PCI_BIOS
390
/*
391 392
 * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS
 * based config access (CONFIG_PCI_GOBIOS) support.
393
 */
394
#define BIOS_PFN	PFN_DOWN(BIOS_BEGIN)
395
#define BIOS_PFN_END	PFN_DOWN(BIOS_END - 1)
396

397
static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
398
{
399
	if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
400 401 402 403
		return _PAGE_NX;
	return 0;
}
#else
404
static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
405 406 407
{
	return 0;
}
408
#endif
409

410 411 412 413 414
/*
 * The .rodata section needs to be read-only. Using the pfn catches all
 * aliases.  This also includes __ro_after_init, so do not enforce until
 * kernel_set_to_readonly is true.
 */
415
static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
416
{
417 418 419 420 421 422 423
	unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));

	/*
	 * Note: __end_rodata is at page aligned and not inclusive, so
	 * subtract 1 to get the last enforced PFN in the rodata area.
	 */
	epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
424

425
	if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
426 427 428 429 430 431 432 433 434 435 436 437
		return _PAGE_RW;
	return 0;
}

/*
 * Protect kernel text against becoming non executable by forbidding
 * _PAGE_NX.  This protects only the high kernel mapping (_text -> _etext)
 * out of which the kernel actually executes.  Do not protect the low
 * mapping.
 *
 * This does not cover __inittext since that is gone after boot.
 */
438
static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
439
{
440 441 442 443
	unsigned long t_end = (unsigned long)_etext - 1;
	unsigned long t_start = (unsigned long)_text;

	if (overlaps(start, end, t_start, t_end))
444 445 446
		return _PAGE_NX;
	return 0;
}
447

448
#if defined(CONFIG_X86_64)
449 450 451 452 453 454 455 456 457
/*
 * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
 * kernel text mappings for the large page aligned text, rodata sections
 * will be always read-only. For the kernel identity mappings covering the
 * holes caused by this alignment can be anything that user asks.
 *
 * This will preserve the large page mappings for kernel text/data at no
 * extra cost.
 */
458 459
static pgprotval_t protect_kernel_text_ro(unsigned long start,
					  unsigned long end)
460
{
461 462
	unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
	unsigned long t_start = (unsigned long)_text;
463 464
	unsigned int level;

465
	if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
466
		return 0;
467
	/*
468 469 470
	 * Don't enforce the !RW mapping for the kernel text mapping, if
	 * the current mapping is already using small page mapping.  No
	 * need to work hard to preserve large page mappings in this case.
471
	 *
472 473 474 475 476 477
	 * This also fixes the Linux Xen paravirt guest boot failure caused
	 * by unexpected read-only mappings for kernel identity
	 * mappings. In this paravirt guest case, the kernel text mapping
	 * and the kernel identity mapping share the same page-table pages,
	 * so the protections for kernel text and identity mappings have to
	 * be the same.
478
	 */
479
	if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
480 481 482 483
		return _PAGE_RW;
	return 0;
}
#else
484 485
static pgprotval_t protect_kernel_text_ro(unsigned long start,
					  unsigned long end)
486 487 488
{
	return 0;
}
489 490
#endif

491 492 493 494 495 496 497 498 499 500
static inline bool conflicts(pgprot_t prot, pgprotval_t val)
{
	return (pgprot_val(prot) & ~val) != pgprot_val(prot);
}

static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
				  unsigned long start, unsigned long end,
				  unsigned long pfn, const char *txt)
{
	static const char *lvltxt[] = {
501
		[CPA_CONFLICT]	= "conflict",
502 503 504 505 506 507 508 509 510 511 512 513
		[CPA_PROTECT]	= "protect",
		[CPA_DETECT]	= "detect",
	};

	if (warnlvl > cpa_warn_level || !conflicts(prot, val))
		return;

	pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
		lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
		(unsigned long long)val);
}

514 515 516 517 518 519
/*
 * Certain areas of memory on x86 require very specific protection flags,
 * for example the BIOS area or kernel text. Callers don't always get this
 * right (again, ioremap() on BIOS memory is not uncommon) so this function
 * checks and fixes these known static required protection bits.
 */
520
static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
521
					  unsigned long pfn, unsigned long npg,
522
					  unsigned long lpsize, int warnlvl)
523
{
524
	pgprotval_t forbidden, res;
525
	unsigned long end;
526

527 528 529 530 531 532 533
	/*
	 * There is no point in checking RW/NX conflicts when the requested
	 * mapping is setting the page !PRESENT.
	 */
	if (!(pgprot_val(prot) & _PAGE_PRESENT))
		return prot;

534
	/* Operate on the virtual address */
535
	end = start + npg * PAGE_SIZE - 1;
536 537 538 539 540

	res = protect_kernel_text(start, end);
	check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
	forbidden = res;

541 542 543 544 545 546 547 548 549 550 551
	/*
	 * Special case to preserve a large page. If the change spawns the
	 * full large page mapping then there is no point to split it
	 * up. Happens with ftrace and is going to be removed once ftrace
	 * switched to text_poke().
	 */
	if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
		res = protect_kernel_text_ro(start, end);
		check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
		forbidden |= res;
	}
552 553

	/* Check the PFN directly */
554 555 556 557 558 559 560
	res = protect_pci_bios(pfn, pfn + npg - 1);
	check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
	forbidden |= res;

	res = protect_rodata(pfn, pfn + npg - 1);
	check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
	forbidden |= res;
I
Ingo Molnar 已提交
561

562
	return __pgprot(pgprot_val(prot) & ~forbidden);
I
Ingo Molnar 已提交
563 564
}

565 566 567 568 569 570
/*
 * Lookup the page table entry for a virtual address in a specific pgd.
 * Return a pointer to the entry and the level of the mapping.
 */
pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
			     unsigned int *level)
571
{
572
	p4d_t *p4d;
L
Linus Torvalds 已提交
573 574
	pud_t *pud;
	pmd_t *pmd;
575

T
Thomas Gleixner 已提交
576 577
	*level = PG_LEVEL_NONE;

L
Linus Torvalds 已提交
578 579
	if (pgd_none(*pgd))
		return NULL;
I
Ingo Molnar 已提交
580

581 582 583 584 585 586 587 588 589
	p4d = p4d_offset(pgd, address);
	if (p4d_none(*p4d))
		return NULL;

	*level = PG_LEVEL_512G;
	if (p4d_large(*p4d) || !p4d_present(*p4d))
		return (pte_t *)p4d;

	pud = pud_offset(p4d, address);
L
Linus Torvalds 已提交
590 591
	if (pud_none(*pud))
		return NULL;
592 593 594 595 596

	*level = PG_LEVEL_1G;
	if (pud_large(*pud) || !pud_present(*pud))
		return (pte_t *)pud;

L
Linus Torvalds 已提交
597 598 599
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
T
Thomas Gleixner 已提交
600 601

	*level = PG_LEVEL_2M;
T
Thomas Gleixner 已提交
602
	if (pmd_large(*pmd) || !pmd_present(*pmd))
L
Linus Torvalds 已提交
603 604
		return (pte_t *)pmd;

T
Thomas Gleixner 已提交
605
	*level = PG_LEVEL_4K;
I
Ingo Molnar 已提交
606

607 608
	return pte_offset_kernel(pmd, address);
}
609 610 611 612 613 614 615 616 617 618 619

/*
 * Lookup the page table entry for a virtual address. Return a pointer
 * to the entry and the level of the mapping.
 *
 * Note: We return pud and pmd either when the entry is marked large
 * or when the present bit is not set. Otherwise we would return a
 * pointer to a nonexisting mapping.
 */
pte_t *lookup_address(unsigned long address, unsigned int *level)
{
620
	return lookup_address_in_pgd(pgd_offset_k(address), address, level);
621
}
622
EXPORT_SYMBOL_GPL(lookup_address);
623

624 625 626 627 628 629 630 631 632 633 634
/*
 * Lookup the page table entry for a virtual address in a given mm. Return a
 * pointer to the entry and the level of the mapping.
 */
pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
			    unsigned int *level)
{
	return lookup_address_in_pgd(pgd_offset(mm, address), address, level);
}
EXPORT_SYMBOL_GPL(lookup_address_in_mm);

635 636 637
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
				  unsigned int *level)
{
638
	if (cpa->pgd)
639
		return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
640 641
					       address, level);

642
	return lookup_address(address, level);
643 644
}

645 646 647 648 649 650 651
/*
 * Lookup the PMD entry for a virtual address. Return a pointer to the entry
 * or NULL if not present.
 */
pmd_t *lookup_pmd_address(unsigned long address)
{
	pgd_t *pgd;
652
	p4d_t *p4d;
653 654 655 656 657 658
	pud_t *pud;

	pgd = pgd_offset_k(address);
	if (pgd_none(*pgd))
		return NULL;

659 660 661 662 663
	p4d = p4d_offset(pgd, address);
	if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
		return NULL;

	pud = pud_offset(p4d, address);
664 665 666 667 668 669
	if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
		return NULL;

	return pmd_offset(pud, address);
}

670 671 672 673 674 675 676 677 678 679 680 681 682 683
/*
 * This is necessary because __pa() does not work on some
 * kinds of memory, like vmalloc() or the alloc_remap()
 * areas on 32-bit NUMA systems.  The percpu areas can
 * end up in this kind of memory, for instance.
 *
 * This could be optimized, but it is only intended to be
 * used at inititalization time, and keeping it
 * unoptimized should increase the testing coverage for
 * the more obscure platforms.
 */
phys_addr_t slow_virt_to_phys(void *__virt_addr)
{
	unsigned long virt_addr = (unsigned long)__virt_addr;
684 685
	phys_addr_t phys_addr;
	unsigned long offset;
686 687 688 689 690
	enum pg_level level;
	pte_t *pte;

	pte = lookup_address(virt_addr, &level);
	BUG_ON(!pte);
691

692 693 694 695 696
	/*
	 * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
	 * before being left-shifted PAGE_SHIFT bits -- this trick is to
	 * make 32-PAE kernel work correctly.
	 */
697 698
	switch (level) {
	case PG_LEVEL_1G:
699
		phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
700 701 702
		offset = virt_addr & ~PUD_PAGE_MASK;
		break;
	case PG_LEVEL_2M:
703
		phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
704 705 706
		offset = virt_addr & ~PMD_PAGE_MASK;
		break;
	default:
707
		phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
708 709 710 711
		offset = virt_addr & ~PAGE_MASK;
	}

	return (phys_addr_t)(phys_addr | offset);
712 713 714
}
EXPORT_SYMBOL_GPL(slow_virt_to_phys);

I
Ingo Molnar 已提交
715 716 717
/*
 * Set the new pmd in all the pgds we know about:
 */
I
Ingo Molnar 已提交
718
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
719 720 721
{
	/* change init_mm */
	set_pte_atomic(kpte, pte);
722
#ifdef CONFIG_X86_32
723
	if (!SHARED_KERNEL_PMD) {
724 725
		struct page *page;

726
		list_for_each_entry(page, &pgd_list, lru) {
727
			pgd_t *pgd;
728
			p4d_t *p4d;
729 730 731 732
			pud_t *pud;
			pmd_t *pmd;

			pgd = (pgd_t *)page_address(page) + pgd_index(address);
733 734
			p4d = p4d_offset(pgd, address);
			pud = pud_offset(p4d, address);
735 736 737
			pmd = pmd_offset(pud, address);
			set_pte_atomic((pte_t *)pmd, pte);
		}
L
Linus Torvalds 已提交
738
	}
739
#endif
L
Linus Torvalds 已提交
740 741
}

742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
{
	/*
	 * _PAGE_GLOBAL means "global page" for present PTEs.
	 * But, it is also used to indicate _PAGE_PROTNONE
	 * for non-present PTEs.
	 *
	 * This ensures that a _PAGE_GLOBAL PTE going from
	 * present to non-present is not confused as
	 * _PAGE_PROTNONE.
	 */
	if (!(pgprot_val(prot) & _PAGE_PRESENT))
		pgprot_val(prot) &= ~_PAGE_GLOBAL;

	return prot;
}

759 760
static int __should_split_large_page(pte_t *kpte, unsigned long address,
				     struct cpa_data *cpa)
761
{
762
	unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
763
	pgprot_t old_prot, new_prot, req_prot, chk_prot;
764
	pte_t new_pte, *tmp;
765
	enum pg_level level;
766 767 768 769 770

	/*
	 * Check for races, another CPU might have split this page
	 * up already:
	 */
771
	tmp = _lookup_address_cpa(cpa, address, &level);
772
	if (tmp != kpte)
773
		return 1;
774 775 776

	switch (level) {
	case PG_LEVEL_2M:
777 778
		old_prot = pmd_pgprot(*(pmd_t *)kpte);
		old_pfn = pmd_pfn(*(pmd_t *)kpte);
779
		cpa_inc_2m_checked();
780
		break;
781
	case PG_LEVEL_1G:
782 783
		old_prot = pud_pgprot(*(pud_t *)kpte);
		old_pfn = pud_pfn(*(pud_t *)kpte);
784
		cpa_inc_1g_checked();
785
		break;
786
	default:
787
		return -EINVAL;
788 789
	}

790 791 792
	psize = page_level_size(level);
	pmask = page_level_mask(level);

793 794 795 796
	/*
	 * Calculate the number of pages, which fit into this large
	 * page starting at address:
	 */
797 798
	lpaddr = (address + psize) & pmask;
	numpages = (lpaddr - address) >> PAGE_SHIFT;
799 800
	if (numpages < cpa->numpages)
		cpa->numpages = numpages;
801 802 803

	/*
	 * We are safe now. Check whether the new pgprot is the same:
804 805
	 * Convert protection attributes to 4k-format, as cpa->mask* are set
	 * up accordingly.
806
	 */
807

808
	/* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */
809
	req_prot = pgprot_large_2_4k(old_prot);
810

811 812
	pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
	pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
T
Thomas Gleixner 已提交
813

814 815 816 817 818 819
	/*
	 * req_prot is in format of 4k pages. It must be converted to large
	 * page format: the caching mode includes the PAT bit located at
	 * different bit positions in the two formats.
	 */
	req_prot = pgprot_4k_2_large(req_prot);
820
	req_prot = pgprot_clear_protnone_bits(req_prot);
821
	if (pgprot_val(req_prot) & _PAGE_PRESENT)
822
		pgprot_val(req_prot) |= _PAGE_PSE;
823

T
Thomas Gleixner 已提交
824
	/*
825 826
	 * old_pfn points to the large page base pfn. So we need to add the
	 * offset of the virtual address:
T
Thomas Gleixner 已提交
827
	 */
828
	pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
T
Thomas Gleixner 已提交
829 830
	cpa->pfn = pfn;

831 832 833 834 835 836
	/*
	 * Calculate the large page base address and the number of 4K pages
	 * in the large page
	 */
	lpaddr = address & pmask;
	numpages = psize >> PAGE_SHIFT;
837

838 839 840 841 842 843
	/*
	 * Sanity check that the existing mapping is correct versus the static
	 * protections. static_protections() guards against !PRESENT, so no
	 * extra conditional required here.
	 */
	chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
844
				      psize, CPA_CONFLICT);
845 846 847 848 849 850 851 852 853 854

	if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
		/*
		 * Split the large page and tell the split code to
		 * enforce static protections.
		 */
		cpa->force_static_prot = 1;
		return 1;
	}

855 856 857 858 859 860 861 862 863 864 865 866 867 868
	/*
	 * Optimization: If the requested pgprot is the same as the current
	 * pgprot, then the large page can be preserved and no updates are
	 * required independent of alignment and length of the requested
	 * range. The above already established that the current pgprot is
	 * correct, which in consequence makes the requested pgprot correct
	 * as well if it is the same. The static protection scan below will
	 * not come to a different conclusion.
	 */
	if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
		cpa_inc_lp_sameprot(level);
		return 0;
	}

869
	/*
870
	 * If the requested range does not cover the full page, split it up
871
	 */
872 873
	if (address != lpaddr || cpa->numpages != numpages)
		return 1;
874 875

	/*
876 877
	 * Check whether the requested pgprot is conflicting with a static
	 * protection requirement in the large page.
878
	 */
879
	new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
880
				      psize, CPA_DETECT);
881 882

	/*
883 884 885 886 887 888 889
	 * If there is a conflict, split the large page.
	 *
	 * There used to be a 4k wise evaluation trying really hard to
	 * preserve the large pages, but experimentation has shown, that this
	 * does not help at all. There might be corner cases which would
	 * preserve one large page occasionally, but it's really not worth the
	 * extra code and cycles for the common case.
890
	 */
891
	if (pgprot_val(req_prot) != pgprot_val(new_prot))
892 893 894 895 896 897
		return 1;

	/* All checks passed. Update the large page mapping. */
	new_pte = pfn_pte(old_pfn, new_prot);
	__set_pmd_pte(kpte, address, new_pte);
	cpa->flags |= CPA_FLUSHTLB;
898
	cpa_inc_lp_preserved(level);
899 900 901 902 903 904 905 906 907 908
	return 0;
}

static int should_split_large_page(pte_t *kpte, unsigned long address,
				   struct cpa_data *cpa)
{
	int do_split;

	if (cpa->force_split)
		return 1;
909

910 911
	spin_lock(&pgd_lock);
	do_split = __should_split_large_page(kpte, address, cpa);
A
Andrea Arcangeli 已提交
912
	spin_unlock(&pgd_lock);
I
Ingo Molnar 已提交
913

I
Ingo Molnar 已提交
914
	return do_split;
915 916
}

917 918 919 920 921 922 923 924 925 926 927 928 929 930
static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
			  pgprot_t ref_prot, unsigned long address,
			  unsigned long size)
{
	unsigned int npg = PFN_DOWN(size);
	pgprot_t prot;

	/*
	 * If should_split_large_page() discovered an inconsistent mapping,
	 * remove the invalid protection in the split mapping.
	 */
	if (!cpa->force_static_prot)
		goto set;

931 932
	/* Hand in lpsize = 0 to enforce the protection mechanism */
	prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952

	if (pgprot_val(prot) == pgprot_val(ref_prot))
		goto set;

	/*
	 * If this is splitting a PMD, fix it up. PUD splits cannot be
	 * fixed trivially as that would require to rescan the newly
	 * installed PMD mappings after returning from split_large_page()
	 * so an eventual further split can allocate the necessary PTE
	 * pages. Warn for now and revisit it in case this actually
	 * happens.
	 */
	if (size == PAGE_SIZE)
		ref_prot = prot;
	else
		pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
set:
	set_pte(pte, pfn_pte(pfn, ref_prot));
}

953
static int
954 955
__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
		   struct page *base)
956
{
957
	unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
958
	pte_t *pbase = (pte_t *)page_address(base);
I
Ingo Molnar 已提交
959 960
	unsigned int i, level;
	pgprot_t ref_prot;
961
	pte_t *tmp;
962

A
Andrea Arcangeli 已提交
963
	spin_lock(&pgd_lock);
964 965 966 967
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
	 */
968
	tmp = _lookup_address_cpa(cpa, address, &level);
969 970 971 972
	if (tmp != kpte) {
		spin_unlock(&pgd_lock);
		return 1;
	}
973

974
	paravirt_alloc_pte(&init_mm, page_to_pfn(base));
975

976 977 978
	switch (level) {
	case PG_LEVEL_2M:
		ref_prot = pmd_pgprot(*(pmd_t *)kpte);
979 980 981 982
		/*
		 * Clear PSE (aka _PAGE_PAT) and move
		 * PAT bit to correct position.
		 */
983
		ref_prot = pgprot_large_2_4k(ref_prot);
984
		ref_pfn = pmd_pfn(*(pmd_t *)kpte);
985 986
		lpaddr = address & PMD_MASK;
		lpinc = PAGE_SIZE;
987
		break;
988

989 990 991
	case PG_LEVEL_1G:
		ref_prot = pud_pgprot(*(pud_t *)kpte);
		ref_pfn = pud_pfn(*(pud_t *)kpte);
992
		pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
993 994
		lpaddr = address & PUD_MASK;
		lpinc = PMD_SIZE;
995
		/*
996
		 * Clear the PSE flags if the PRESENT flag is not set
997 998 999
		 * otherwise pmd_present/pmd_huge will return true
		 * even on a non present pmd.
		 */
1000
		if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
1001
			pgprot_val(ref_prot) &= ~_PAGE_PSE;
1002 1003 1004 1005 1006
		break;

	default:
		spin_unlock(&pgd_lock);
		return 1;
1007 1008
	}

1009
	ref_prot = pgprot_clear_protnone_bits(ref_prot);
1010

1011 1012 1013
	/*
	 * Get the target pfn from the original entry:
	 */
1014
	pfn = ref_pfn;
1015 1016
	for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
		split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
1017

1018 1019 1020 1021 1022 1023
	if (virt_addr_valid(address)) {
		unsigned long pfn = PFN_DOWN(__pa(address));

		if (pfn_range_is_mapped(pfn, pfn + 1))
			split_page_count(level);
	}
1024

1025
	/*
1026
	 * Install the new, split up pagetable.
1027
	 *
1028 1029 1030
	 * We use the standard kernel pagetable protections for the new
	 * pagetable protections, the actual ptes set above control the
	 * primary protection behavior:
1031
	 */
1032
	__set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
1033 1034

	/*
1035 1036
	 * Do a global flush tlb after splitting the large page
	 * and before we do the actual change page attribute in the PTE.
1037
	 *
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	 * Without this, we violate the TLB application note, that says:
	 * "The TLBs may contain both ordinary and large-page
	 *  translations for a 4-KByte range of linear addresses. This
	 *  may occur if software modifies the paging structures so that
	 *  the page size used for the address range changes. If the two
	 *  translations differ with respect to page frame or attributes
	 *  (e.g., permissions), processor behavior is undefined and may
	 *  be implementation-specific."
	 *
	 * We do this global tlb flush inside the cpa_lock, so that we
	 * don't allow any other cpu, with stale tlb entries change the
	 * page attribute in parallel, that also falls into the
	 * just split large page entry.
1051
	 */
1052
	flush_tlb_all();
1053
	spin_unlock(&pgd_lock);
1054

1055 1056
	return 0;
}
1057

1058 1059
static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
			    unsigned long address)
1060 1061 1062
{
	struct page *base;

1063
	if (!debug_pagealloc_enabled())
1064
		spin_unlock(&cpa_lock);
1065
	base = alloc_pages(GFP_KERNEL, 0);
1066
	if (!debug_pagealloc_enabled())
1067 1068 1069 1070
		spin_lock(&cpa_lock);
	if (!base)
		return -ENOMEM;

1071
	if (__split_large_page(cpa, kpte, address, base))
S
Suresh Siddha 已提交
1072
		__free_page(base);
1073 1074 1075 1076

	return 0;
}

1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
static bool try_to_free_pte_page(pte_t *pte)
{
	int i;

	for (i = 0; i < PTRS_PER_PTE; i++)
		if (!pte_none(pte[i]))
			return false;

	free_page((unsigned long)pte);
	return true;
}

static bool try_to_free_pmd_page(pmd_t *pmd)
{
	int i;

	for (i = 0; i < PTRS_PER_PMD; i++)
		if (!pmd_none(pmd[i]))
			return false;

	free_page((unsigned long)pmd);
	return true;
}

static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
{
	pte_t *pte = pte_offset_kernel(pmd, start);

	while (start < end) {
		set_pte(pte, __pte(0));

		start += PAGE_SIZE;
		pte++;
	}

	if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
		pmd_clear(pmd);
		return true;
	}
	return false;
}

static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
			      unsigned long start, unsigned long end)
{
	if (unmap_pte_range(pmd, start, end))
		if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
			pud_clear(pud);
}

static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
{
	pmd_t *pmd = pmd_offset(pud, start);

	/*
	 * Not on a 2MB page boundary?
	 */
	if (start & (PMD_SIZE - 1)) {
		unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
		unsigned long pre_end = min_t(unsigned long, end, next_page);

		__unmap_pmd_range(pud, pmd, start, pre_end);

		start = pre_end;
		pmd++;
	}

	/*
	 * Try to unmap in 2M chunks.
	 */
	while (end - start >= PMD_SIZE) {
		if (pmd_large(*pmd))
			pmd_clear(pmd);
		else
			__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);

		start += PMD_SIZE;
		pmd++;
	}

	/*
	 * 4K leftovers?
	 */
	if (start < end)
		return __unmap_pmd_range(pud, pmd, start, end);

	/*
	 * Try again to free the PMD page if haven't succeeded above.
	 */
	if (!pud_none(*pud))
		if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
			pud_clear(pud);
}
1170

1171
static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
1172
{
1173
	pud_t *pud = pud_offset(p4d, start);
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213

	/*
	 * Not on a GB page boundary?
	 */
	if (start & (PUD_SIZE - 1)) {
		unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
		unsigned long pre_end	= min_t(unsigned long, end, next_page);

		unmap_pmd_range(pud, start, pre_end);

		start = pre_end;
		pud++;
	}

	/*
	 * Try to unmap in 1G chunks?
	 */
	while (end - start >= PUD_SIZE) {

		if (pud_large(*pud))
			pud_clear(pud);
		else
			unmap_pmd_range(pud, start, start + PUD_SIZE);

		start += PUD_SIZE;
		pud++;
	}

	/*
	 * 2M leftovers?
	 */
	if (start < end)
		unmap_pmd_range(pud, start, end);

	/*
	 * No need to try to free the PUD page because we'll free it in
	 * populate_pgd's error path
	 */
}

1214 1215
static int alloc_pte_page(pmd_t *pmd)
{
1216
	pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
1217 1218 1219 1220 1221 1222 1223
	if (!pte)
		return -1;

	set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
	return 0;
}

1224 1225
static int alloc_pmd_page(pud_t *pud)
{
1226
	pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
1227 1228 1229 1230 1231 1232 1233
	if (!pmd)
		return -1;

	set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	return 0;
}

1234 1235 1236 1237 1238 1239 1240 1241
static void populate_pte(struct cpa_data *cpa,
			 unsigned long start, unsigned long end,
			 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
{
	pte_t *pte;

	pte = pte_offset_kernel(pmd, start);

1242
	pgprot = pgprot_clear_protnone_bits(pgprot);
1243 1244

	while (num_pages-- && start < end) {
1245
		set_pte(pte, pfn_pte(cpa->pfn, pgprot));
1246 1247

		start	 += PAGE_SIZE;
1248
		cpa->pfn++;
1249 1250 1251
		pte++;
	}
}
1252

1253 1254 1255
static long populate_pmd(struct cpa_data *cpa,
			 unsigned long start, unsigned long end,
			 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
1256
{
1257
	long cur_pages = 0;
1258
	pmd_t *pmd;
1259
	pgprot_t pmd_pgprot;
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290

	/*
	 * Not on a 2M boundary?
	 */
	if (start & (PMD_SIZE - 1)) {
		unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
		unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;

		pre_end   = min_t(unsigned long, pre_end, next_page);
		cur_pages = (pre_end - start) >> PAGE_SHIFT;
		cur_pages = min_t(unsigned int, num_pages, cur_pages);

		/*
		 * Need a PTE page?
		 */
		pmd = pmd_offset(pud, start);
		if (pmd_none(*pmd))
			if (alloc_pte_page(pmd))
				return -1;

		populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);

		start = pre_end;
	}

	/*
	 * We mapped them all?
	 */
	if (num_pages == cur_pages)
		return cur_pages;

1291 1292
	pmd_pgprot = pgprot_4k_2_large(pgprot);

1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
	while (end - start >= PMD_SIZE) {

		/*
		 * We cannot use a 1G page so allocate a PMD page if needed.
		 */
		if (pud_none(*pud))
			if (alloc_pmd_page(pud))
				return -1;

		pmd = pmd_offset(pud, start);

1304 1305
		set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
					canon_pgprot(pmd_pgprot))));
1306 1307

		start	  += PMD_SIZE;
1308
		cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
		cur_pages += PMD_SIZE >> PAGE_SHIFT;
	}

	/*
	 * Map trailing 4K pages.
	 */
	if (start < end) {
		pmd = pmd_offset(pud, start);
		if (pmd_none(*pmd))
			if (alloc_pte_page(pmd))
				return -1;

		populate_pte(cpa, start, end, num_pages - cur_pages,
			     pmd, pgprot);
	}
	return num_pages;
}
1326

1327 1328
static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
			pgprot_t pgprot)
1329 1330 1331
{
	pud_t *pud;
	unsigned long end;
1332
	long cur_pages = 0;
1333
	pgprot_t pud_pgprot;
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348

	end = start + (cpa->numpages << PAGE_SHIFT);

	/*
	 * Not on a Gb page boundary? => map everything up to it with
	 * smaller pages.
	 */
	if (start & (PUD_SIZE - 1)) {
		unsigned long pre_end;
		unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;

		pre_end   = min_t(unsigned long, end, next_page);
		cur_pages = (pre_end - start) >> PAGE_SHIFT;
		cur_pages = min_t(int, (int)cpa->numpages, cur_pages);

1349
		pud = pud_offset(p4d, start);
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369

		/*
		 * Need a PMD page?
		 */
		if (pud_none(*pud))
			if (alloc_pmd_page(pud))
				return -1;

		cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
					 pud, pgprot);
		if (cur_pages < 0)
			return cur_pages;

		start = pre_end;
	}

	/* We mapped them all? */
	if (cpa->numpages == cur_pages)
		return cur_pages;

1370
	pud = pud_offset(p4d, start);
1371
	pud_pgprot = pgprot_4k_2_large(pgprot);
1372 1373 1374 1375

	/*
	 * Map everything starting from the Gb boundary, possibly with 1G pages
	 */
1376
	while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
1377 1378
		set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
				   canon_pgprot(pud_pgprot))));
1379 1380

		start	  += PUD_SIZE;
1381
		cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
1382 1383 1384 1385 1386 1387
		cur_pages += PUD_SIZE >> PAGE_SHIFT;
		pud++;
	}

	/* Map trailing leftover */
	if (start < end) {
1388
		long tmp;
1389

1390
		pud = pud_offset(p4d, start);
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
		if (pud_none(*pud))
			if (alloc_pmd_page(pud))
				return -1;

		tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
				   pud, pgprot);
		if (tmp < 0)
			return cur_pages;

		cur_pages += tmp;
	}
	return cur_pages;
}
1404 1405 1406 1407 1408 1409 1410 1411 1412

/*
 * Restrictions for kernel page table do not necessarily apply when mapping in
 * an alternate PGD.
 */
static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
{
	pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
	pud_t *pud = NULL;	/* shut up gcc */
1413
	p4d_t *p4d;
1414
	pgd_t *pgd_entry;
1415
	long ret;
1416 1417 1418

	pgd_entry = cpa->pgd + pgd_index(addr);

1419
	if (pgd_none(*pgd_entry)) {
1420
		p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
1421 1422 1423 1424 1425 1426
		if (!p4d)
			return -1;

		set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
	}

1427 1428 1429
	/*
	 * Allocate a PUD page and hand it down for mapping.
	 */
1430 1431
	p4d = p4d_offset(pgd_entry, addr);
	if (p4d_none(*p4d)) {
1432
		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
1433 1434
		if (!pud)
			return -1;
1435

1436
		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
1437 1438 1439 1440 1441
	}

	pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
	pgprot_val(pgprot) |=  pgprot_val(cpa->mask_set);

1442
	ret = populate_pud(cpa, addr, p4d, pgprot);
1443
	if (ret < 0) {
1444 1445 1446 1447 1448
		/*
		 * Leave the PUD page in place in case some other CPU or thread
		 * already found it, but remove any useless entries we just
		 * added to it.
		 */
1449
		unmap_pud_range(p4d, addr,
1450
				addr + (cpa->numpages << PAGE_SHIFT));
1451
		return ret;
1452
	}
1453

1454 1455 1456 1457
	cpa->numpages = ret;
	return 0;
}

1458 1459 1460
static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
			       int primary)
{
1461 1462 1463 1464 1465 1466
	if (cpa->pgd) {
		/*
		 * Right now, we only execute this code path when mapping
		 * the EFI virtual memory map regions, no other users
		 * provide a ->pgd value. This may change in the future.
		 */
1467
		return populate_pgd(cpa, vaddr);
1468
	}
1469

1470 1471 1472
	/*
	 * Ignore all non primary paths.
	 */
1473 1474
	if (!primary) {
		cpa->numpages = 1;
1475
		return 0;
1476
	}
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489

	/*
	 * Ignore the NULL PTE for kernel identity mapping, as it is expected
	 * to have holes.
	 * Also set numpages to '1' indicating that we processed cpa req for
	 * one virtual address page and its pfn. TBD: numpages can be set based
	 * on the initial value and the level returned by lookup_address().
	 */
	if (within(vaddr, PAGE_OFFSET,
		   PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
		cpa->numpages = 1;
		cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
		return 0;
1490 1491 1492 1493

	} else if (__cpa_pfn_in_highmap(cpa->pfn)) {
		/* Faults in the highmap are OK, so do not warn: */
		return -EFAULT;
1494 1495 1496 1497 1498 1499 1500 1501 1502
	} else {
		WARN(1, KERN_WARNING "CPA: called for zero pte. "
			"vaddr = %lx cpa->vaddr = %lx\n", vaddr,
			*cpa->vaddr);

		return -EFAULT;
	}
}

T
Thomas Gleixner 已提交
1503
static int __change_page_attr(struct cpa_data *cpa, int primary)
1504
{
1505
	unsigned long address;
1506 1507
	int do_split, err;
	unsigned int level;
T
Thomas Gleixner 已提交
1508
	pte_t *kpte, old_pte;
L
Linus Torvalds 已提交
1509

1510
	address = __cpa_addr(cpa, cpa->curpage);
1511
repeat:
1512
	kpte = _lookup_address_cpa(cpa, address, &level);
L
Linus Torvalds 已提交
1513
	if (!kpte)
1514
		return __cpa_process_fault(cpa, address, primary);
T
Thomas Gleixner 已提交
1515 1516

	old_pte = *kpte;
1517
	if (pte_none(old_pte))
1518
		return __cpa_process_fault(cpa, address, primary);
1519

T
Thomas Gleixner 已提交
1520
	if (level == PG_LEVEL_4K) {
T
Thomas Gleixner 已提交
1521
		pte_t new_pte;
1522
		pgprot_t new_prot = pte_pgprot(old_pte);
T
Thomas Gleixner 已提交
1523
		unsigned long pfn = pte_pfn(old_pte);
I
Ingo Molnar 已提交
1524

T
Thomas Gleixner 已提交
1525 1526
		pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
		pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
I
Ingo Molnar 已提交
1527

1528
		cpa_inc_4k_install();
1529 1530
		/* Hand in lpsize = 0 to enforce the protection mechanism */
		new_prot = static_protections(new_prot, address, pfn, 1, 0,
1531
					      CPA_PROTECT);
I
Ingo Molnar 已提交
1532

1533
		new_prot = pgprot_clear_protnone_bits(new_prot);
1534

1535 1536 1537 1538 1539
		/*
		 * We need to keep the pfn from the existing PTE,
		 * after all we're only going to change it's attributes
		 * not the memory it points to
		 */
1540
		new_pte = pfn_pte(pfn, new_prot);
T
Thomas Gleixner 已提交
1541
		cpa->pfn = pfn;
1542 1543 1544 1545 1546
		/*
		 * Do we really change anything ?
		 */
		if (pte_val(old_pte) != pte_val(new_pte)) {
			set_pte_atomic(kpte, new_pte);
1547
			cpa->flags |= CPA_FLUSHTLB;
1548
		}
1549
		cpa->numpages = 1;
1550
		return 0;
L
Linus Torvalds 已提交
1551
	}
1552 1553 1554 1555 1556

	/*
	 * Check, whether we can keep the large page intact
	 * and just change the pte:
	 */
1557
	do_split = should_split_large_page(kpte, address, cpa);
1558 1559
	/*
	 * When the range fits into the existing large page,
1560
	 * return. cp->numpages and cpa->tlbflush have been updated in
1561 1562
	 * try_large_page:
	 */
I
Ingo Molnar 已提交
1563 1564
	if (do_split <= 0)
		return do_split;
1565 1566 1567 1568

	/*
	 * We have to split the large page:
	 */
1569
	err = split_large_page(cpa, kpte, address);
1570
	if (!err)
I
Ingo Molnar 已提交
1571
		goto repeat;
I
Ingo Molnar 已提交
1572

I
Ingo Molnar 已提交
1573
	return err;
1574
}
L
Linus Torvalds 已提交
1575

T
Thomas Gleixner 已提交
1576 1577 1578
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);

static int cpa_process_alias(struct cpa_data *cpa)
L
Linus Torvalds 已提交
1579
{
T
Thomas Gleixner 已提交
1580
	struct cpa_data alias_cpa;
T
Tejun Heo 已提交
1581
	unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
1582
	unsigned long vaddr;
T
Tejun Heo 已提交
1583
	int ret;
1584

1585
	if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
T
Thomas Gleixner 已提交
1586
		return 0;
1587

1588 1589 1590 1591
	/*
	 * No need to redo, when the primary call touched the direct
	 * mapping already:
	 */
1592
	vaddr = __cpa_addr(cpa, cpa->curpage);
1593
	if (!(within(vaddr, PAGE_OFFSET,
1594
		    PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
1595

1596
		alias_cpa = *cpa;
T
Tejun Heo 已提交
1597
		alias_cpa.vaddr = &laddr;
1598
		alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1599
		alias_cpa.curpage = 0;
1600

1601
		ret = __change_page_attr_set_clr(&alias_cpa, 0);
T
Tejun Heo 已提交
1602 1603
		if (ret)
			return ret;
1604
	}
1605 1606

#ifdef CONFIG_X86_64
A
Arjan van de Ven 已提交
1607
	/*
T
Tejun Heo 已提交
1608 1609
	 * If the primary call didn't touch the high mapping already
	 * and the physical address is inside the kernel map, we need
1610
	 * to touch the high mapped kernel as well:
A
Arjan van de Ven 已提交
1611
	 */
T
Tejun Heo 已提交
1612
	if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1613
	    __cpa_pfn_in_highmap(cpa->pfn)) {
T
Tejun Heo 已提交
1614 1615 1616 1617 1618
		unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
					       __START_KERNEL_map - phys_base;
		alias_cpa = *cpa;
		alias_cpa.vaddr = &temp_cpa_vaddr;
		alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1619
		alias_cpa.curpage = 0;
T
Thomas Gleixner 已提交
1620

T
Tejun Heo 已提交
1621 1622 1623 1624 1625 1626
		/*
		 * The high mapping range is imprecise, so ignore the
		 * return value.
		 */
		__change_page_attr_set_clr(&alias_cpa, 0);
	}
A
Arjan van de Ven 已提交
1627
#endif
T
Tejun Heo 已提交
1628 1629

	return 0;
L
Linus Torvalds 已提交
1630 1631
}

T
Thomas Gleixner 已提交
1632
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1633
{
1634
	unsigned long numpages = cpa->numpages;
1635 1636
	unsigned long rempages = numpages;
	int ret = 0;
1637

1638
	while (rempages) {
1639 1640 1641 1642
		/*
		 * Store the remaining nr of pages for the large page
		 * preservation check.
		 */
1643
		cpa->numpages = rempages;
1644
		/* for array changes, we can't use large page */
1645
		if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
1646
			cpa->numpages = 1;
T
Thomas Gleixner 已提交
1647

1648
		if (!debug_pagealloc_enabled())
1649
			spin_lock(&cpa_lock);
T
Thomas Gleixner 已提交
1650
		ret = __change_page_attr(cpa, checkalias);
1651
		if (!debug_pagealloc_enabled())
1652
			spin_unlock(&cpa_lock);
1653
		if (ret)
1654
			goto out;
1655

T
Thomas Gleixner 已提交
1656 1657 1658
		if (checkalias) {
			ret = cpa_process_alias(cpa);
			if (ret)
1659
				goto out;
T
Thomas Gleixner 已提交
1660 1661
		}

1662 1663 1664 1665 1666
		/*
		 * Adjust the number of pages with the result of the
		 * CPA operation. Either a large page has been
		 * preserved or a single page update happened.
		 */
1667 1668
		BUG_ON(cpa->numpages > rempages || !cpa->numpages);
		rempages -= cpa->numpages;
1669
		cpa->curpage += cpa->numpages;
1670
	}
1671 1672 1673 1674 1675

out:
	/* Restore the original numpages */
	cpa->numpages = numpages;
	return ret;
1676 1677
}

1678
static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1679
				    pgprot_t mask_set, pgprot_t mask_clr,
1680 1681
				    int force_split, int in_flag,
				    struct page **pages)
1682
{
T
Thomas Gleixner 已提交
1683
	struct cpa_data cpa;
1684
	int ret, cache, checkalias;
1685

1686 1687
	memset(&cpa, 0, sizeof(cpa));

1688
	/*
1689 1690
	 * Check, if we are requested to set a not supported
	 * feature.  Clearing non-supported features is OK.
1691 1692
	 */
	mask_set = canon_pgprot(mask_set);
1693

1694
	if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
1695 1696
		return 0;

1697
	/* Ensure we are PAGE_SIZE aligned */
1698
	if (in_flag & CPA_ARRAY) {
1699 1700 1701 1702 1703 1704 1705
		int i;
		for (i = 0; i < numpages; i++) {
			if (addr[i] & ~PAGE_MASK) {
				addr[i] &= PAGE_MASK;
				WARN_ON_ONCE(1);
			}
		}
1706 1707 1708
	} else if (!(in_flag & CPA_PAGES_ARRAY)) {
		/*
		 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
I
Ingo Molnar 已提交
1709
		 * No need to check in that case
1710 1711 1712 1713 1714 1715 1716 1717
		 */
		if (*addr & ~PAGE_MASK) {
			*addr &= PAGE_MASK;
			/*
			 * People should not be passing in unaligned addresses:
			 */
			WARN_ON_ONCE(1);
		}
1718 1719
	}

1720 1721 1722
	/* Must avoid aliasing mappings in the highmem code */
	kmap_flush_unused();

N
Nick Piggin 已提交
1723 1724
	vm_unmap_aliases();

T
Thomas Gleixner 已提交
1725
	cpa.vaddr = addr;
1726
	cpa.pages = pages;
T
Thomas Gleixner 已提交
1727 1728 1729
	cpa.numpages = numpages;
	cpa.mask_set = mask_set;
	cpa.mask_clr = mask_clr;
1730 1731
	cpa.flags = 0;
	cpa.curpage = 0;
1732
	cpa.force_split = force_split;
T
Thomas Gleixner 已提交
1733

1734 1735
	if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
		cpa.flags |= in_flag;
1736

1737 1738
	/* No alias checking for _NX bit modifications */
	checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
1739 1740 1741
	/* Has caller explicitly disabled alias checking? */
	if (in_flag & CPA_NO_CHECK_ALIAS)
		checkalias = 0;
1742 1743

	ret = __change_page_attr_set_clr(&cpa, checkalias);
1744

1745 1746 1747
	/*
	 * Check whether we really changed something:
	 */
1748
	if (!(cpa.flags & CPA_FLUSHTLB))
1749
		goto out;
1750

1751 1752 1753 1754
	/*
	 * No need to flush, when we did not set any of the caching
	 * attributes:
	 */
1755
	cache = !!pgprot2cachemode(mask_set);
1756

1757
	/*
1758
	 * On error; flush everything to be sure.
1759
	 */
1760
	if (ret) {
1761
		cpa_flush_all(cache);
1762 1763 1764
		goto out;
	}

1765
	cpa_flush(&cpa, cache);
1766
out:
1767 1768 1769
	return ret;
}

1770 1771
static inline int change_page_attr_set(unsigned long *addr, int numpages,
				       pgprot_t mask, int array)
1772
{
1773
	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
1774
		(array ? CPA_ARRAY : 0), NULL);
1775 1776
}

1777 1778
static inline int change_page_attr_clear(unsigned long *addr, int numpages,
					 pgprot_t mask, int array)
1779
{
1780
	return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
1781
		(array ? CPA_ARRAY : 0), NULL);
1782 1783
}

1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
static inline int cpa_set_pages_array(struct page **pages, int numpages,
				       pgprot_t mask)
{
	return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
		CPA_PAGES_ARRAY, pages);
}

static inline int cpa_clear_pages_array(struct page **pages, int numpages,
					 pgprot_t mask)
{
	return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
		CPA_PAGES_ARRAY, pages);
}

1798
int _set_memory_uc(unsigned long addr, int numpages)
1799
{
1800
	/*
C
Christoph Hellwig 已提交
1801
	 * for now UC MINUS. see comments in ioremap()
1802 1803 1804
	 * If you really need strong UC use ioremap_uc(), but note
	 * that you cannot override IO areas with set_memory_*() as
	 * these helpers cannot work with IO memory.
1805
	 */
1806
	return change_page_attr_set(&addr, numpages,
1807 1808
				    cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
				    0);
1809
}
1810 1811 1812

int set_memory_uc(unsigned long addr, int numpages)
{
1813 1814
	int ret;

1815
	/*
C
Christoph Hellwig 已提交
1816
	 * for now UC MINUS. see comments in ioremap()
1817
	 */
1818
	ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1819
			      _PAGE_CACHE_MODE_UC_MINUS, NULL);
1820 1821 1822 1823 1824 1825 1826 1827
	if (ret)
		goto out_err;

	ret = _set_memory_uc(addr, numpages);
	if (ret)
		goto out_free;

	return 0;
1828

1829
out_free:
1830
	memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1831 1832
out_err:
	return ret;
1833
}
1834 1835
EXPORT_SYMBOL(set_memory_uc);

1836 1837
int _set_memory_wc(unsigned long addr, int numpages)
{
1838
	int ret;
1839

1840
	ret = change_page_attr_set(&addr, numpages,
1841 1842
				   cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
				   0);
1843
	if (!ret) {
1844 1845
		ret = change_page_attr_set_clr(&addr, numpages,
					       cachemode2pgprot(_PAGE_CACHE_MODE_WC),
1846 1847
					       __pgprot(_PAGE_CACHE_MASK),
					       0, 0, NULL);
1848 1849
	}
	return ret;
1850 1851 1852 1853
}

int set_memory_wc(unsigned long addr, int numpages)
{
1854 1855
	int ret;

1856
	ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1857
		_PAGE_CACHE_MODE_WC, NULL);
1858
	if (ret)
1859
		return ret;
1860

1861 1862
	ret = _set_memory_wc(addr, numpages);
	if (ret)
1863
		memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1864 1865

	return ret;
1866 1867 1868
}
EXPORT_SYMBOL(set_memory_wc);

1869 1870 1871 1872 1873 1874
int _set_memory_wt(unsigned long addr, int numpages)
{
	return change_page_attr_set(&addr, numpages,
				    cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
}

1875
int _set_memory_wb(unsigned long addr, int numpages)
1876
{
1877
	/* WB cache mode is hard wired to all cache attribute bits being 0 */
1878 1879
	return change_page_attr_clear(&addr, numpages,
				      __pgprot(_PAGE_CACHE_MASK), 0);
1880
}
1881 1882 1883

int set_memory_wb(unsigned long addr, int numpages)
{
1884 1885 1886 1887 1888 1889
	int ret;

	ret = _set_memory_wb(addr, numpages);
	if (ret)
		return ret;

1890
	memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1891
	return 0;
1892
}
1893 1894 1895 1896
EXPORT_SYMBOL(set_memory_wb);

int set_memory_x(unsigned long addr, int numpages)
{
1897 1898 1899
	if (!(__supported_pte_mask & _PAGE_NX))
		return 0;

1900
	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
1901 1902 1903 1904
}

int set_memory_nx(unsigned long addr, int numpages)
{
1905 1906 1907
	if (!(__supported_pte_mask & _PAGE_NX))
		return 0;

1908
	return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
1909 1910 1911 1912
}

int set_memory_ro(unsigned long addr, int numpages)
{
1913
	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
1914 1915 1916 1917
}

int set_memory_rw(unsigned long addr, int numpages)
{
1918
	return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
1919
}
I
Ingo Molnar 已提交
1920 1921 1922

int set_memory_np(unsigned long addr, int numpages)
{
1923
	return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
I
Ingo Molnar 已提交
1924
}
1925

1926 1927 1928 1929 1930 1931 1932 1933 1934
int set_memory_np_noalias(unsigned long addr, int numpages)
{
	int cpa_flags = CPA_NO_CHECK_ALIAS;

	return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
					__pgprot(_PAGE_PRESENT), 0,
					cpa_flags, NULL);
}

1935 1936
int set_memory_4k(unsigned long addr, int numpages)
{
1937
	return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1938
					__pgprot(0), 1, 0, NULL);
1939 1940
}

1941 1942 1943 1944 1945 1946
int set_memory_nonglobal(unsigned long addr, int numpages)
{
	return change_page_attr_clear(&addr, numpages,
				      __pgprot(_PAGE_GLOBAL), 0);
}

1947 1948 1949 1950 1951 1952
int set_memory_global(unsigned long addr, int numpages)
{
	return change_page_attr_set(&addr, numpages,
				    __pgprot(_PAGE_GLOBAL), 0);
}

1953 1954 1955 1956 1957
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
	struct cpa_data cpa;
	int ret;

1958 1959
	/* Nothing to do if memory encryption is not active */
	if (!mem_encrypt_active())
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979
		return 0;

	/* Should not be working on unaligned addresses */
	if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
		addr &= PAGE_MASK;

	memset(&cpa, 0, sizeof(cpa));
	cpa.vaddr = &addr;
	cpa.numpages = numpages;
	cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
	cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
	cpa.pgd = init_mm.pgd;

	/* Must avoid aliasing mappings in the highmem code */
	kmap_flush_unused();
	vm_unmap_aliases();

	/*
	 * Before changing the encryption attribute, we need to flush caches.
	 */
1980
	cpa_flush(&cpa, 1);
1981 1982 1983 1984

	ret = __change_page_attr_set_clr(&cpa, 1);

	/*
1985 1986 1987 1988 1989
	 * After changing the encryption attribute, we need to flush TLBs again
	 * in case any speculative TLB caching occurred (but no need to flush
	 * caches again).  We could just use cpa_flush_all(), but in case TLB
	 * flushing gets optimized in the cpa_flush() path use the same logic
	 * as above.
1990
	 */
1991
	cpa_flush(&cpa, 0);
1992 1993 1994 1995 1996 1997 1998 1999

	return ret;
}

int set_memory_encrypted(unsigned long addr, int numpages)
{
	return __set_memory_enc_dec(addr, numpages, true);
}
2000
EXPORT_SYMBOL_GPL(set_memory_encrypted);
2001 2002 2003 2004 2005

int set_memory_decrypted(unsigned long addr, int numpages)
{
	return __set_memory_enc_dec(addr, numpages, false);
}
2006
EXPORT_SYMBOL_GPL(set_memory_decrypted);
2007

2008 2009 2010 2011
int set_pages_uc(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
2012
	return set_memory_uc(addr, numpages);
2013 2014 2015
}
EXPORT_SYMBOL(set_pages_uc);

2016
static int _set_pages_array(struct page **pages, int numpages,
2017
		enum page_cache_mode new_type)
2018 2019 2020
{
	unsigned long start;
	unsigned long end;
2021
	enum page_cache_mode set_type;
2022 2023
	int i;
	int free_idx;
2024
	int ret;
2025

2026
	for (i = 0; i < numpages; i++) {
2027 2028 2029
		if (PageHighMem(pages[i]))
			continue;
		start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2030
		end = start + PAGE_SIZE;
2031
		if (memtype_reserve(start, end, new_type, NULL))
2032 2033 2034
			goto err_out;
	}

2035 2036 2037 2038
	/* If WC, set to UC- first and then WC */
	set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
				_PAGE_CACHE_MODE_UC_MINUS : new_type;

2039
	ret = cpa_set_pages_array(pages, numpages,
2040
				  cachemode2pgprot(set_type));
2041
	if (!ret && new_type == _PAGE_CACHE_MODE_WC)
2042
		ret = change_page_attr_set_clr(NULL, numpages,
2043 2044
					       cachemode2pgprot(
						_PAGE_CACHE_MODE_WC),
2045 2046 2047 2048 2049
					       __pgprot(_PAGE_CACHE_MASK),
					       0, CPA_PAGES_ARRAY, pages);
	if (ret)
		goto err_out;
	return 0; /* Success */
2050 2051 2052
err_out:
	free_idx = i;
	for (i = 0; i < free_idx; i++) {
2053 2054 2055
		if (PageHighMem(pages[i]))
			continue;
		start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2056
		end = start + PAGE_SIZE;
2057
		memtype_free(start, end);
2058 2059 2060
	}
	return -EINVAL;
}
2061

2062
int set_pages_array_uc(struct page **pages, int numpages)
2063
{
2064
	return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS);
2065
}
2066 2067
EXPORT_SYMBOL(set_pages_array_uc);

2068
int set_pages_array_wc(struct page **pages, int numpages)
2069
{
2070
	return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC);
2071 2072 2073
}
EXPORT_SYMBOL(set_pages_array_wc);

2074
int set_pages_array_wt(struct page **pages, int numpages)
2075
{
2076
	return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT);
2077 2078 2079
}
EXPORT_SYMBOL_GPL(set_pages_array_wt);

2080 2081 2082 2083
int set_pages_wb(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
2084
	return set_memory_wb(addr, numpages);
2085 2086 2087
}
EXPORT_SYMBOL(set_pages_wb);

2088
int set_pages_array_wb(struct page **pages, int numpages)
2089 2090 2091 2092 2093 2094
{
	int retval;
	unsigned long start;
	unsigned long end;
	int i;

2095
	/* WB cache mode is hard wired to all cache attribute bits being 0 */
2096
	retval = cpa_clear_pages_array(pages, numpages,
2097
			__pgprot(_PAGE_CACHE_MASK));
2098 2099
	if (retval)
		return retval;
2100

2101
	for (i = 0; i < numpages; i++) {
2102 2103 2104
		if (PageHighMem(pages[i]))
			continue;
		start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2105
		end = start + PAGE_SIZE;
2106
		memtype_free(start, end);
2107 2108
	}

2109
	return 0;
2110 2111 2112
}
EXPORT_SYMBOL(set_pages_array_wb);

2113 2114 2115 2116
int set_pages_ro(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
2117
	return set_memory_ro(addr, numpages);
2118 2119 2120 2121 2122
}

int set_pages_rw(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
2123

T
Thomas Gleixner 已提交
2124
	return set_memory_rw(addr, numpages);
I
Ingo Molnar 已提交
2125 2126
}

I
Ingo Molnar 已提交
2127 2128
static int __set_pages_p(struct page *page, int numpages)
{
2129 2130
	unsigned long tempaddr = (unsigned long) page_address(page);
	struct cpa_data cpa = { .vaddr = &tempaddr,
2131
				.pgd = NULL,
T
Thomas Gleixner 已提交
2132 2133
				.numpages = numpages,
				.mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2134 2135
				.mask_clr = __pgprot(0),
				.flags = 0};
2136

2137 2138 2139 2140 2141 2142 2143
	/*
	 * No alias checking needed for setting present flag. otherwise,
	 * we may need to break large pages for 64-bit kernel text
	 * mappings (this adds to complexity if we want to do this from
	 * atomic context especially). Let's keep it simple!
	 */
	return __change_page_attr_set_clr(&cpa, 0);
I
Ingo Molnar 已提交
2144 2145 2146 2147
}

static int __set_pages_np(struct page *page, int numpages)
{
2148 2149
	unsigned long tempaddr = (unsigned long) page_address(page);
	struct cpa_data cpa = { .vaddr = &tempaddr,
2150
				.pgd = NULL,
T
Thomas Gleixner 已提交
2151 2152
				.numpages = numpages,
				.mask_set = __pgprot(0),
2153 2154
				.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
				.flags = 0};
2155

2156 2157 2158 2159 2160 2161 2162
	/*
	 * No alias checking needed for setting not present flag. otherwise,
	 * we may need to break large pages for 64-bit kernel text
	 * mappings (this adds to complexity if we want to do this from
	 * atomic context especially). Let's keep it simple!
	 */
	return __change_page_attr_set_clr(&cpa, 0);
I
Ingo Molnar 已提交
2163 2164
}

2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
int set_direct_map_invalid_noflush(struct page *page)
{
	return __set_pages_np(page, 1);
}

int set_direct_map_default_noflush(struct page *page)
{
	return __set_pages_p(page, 1);
}

2175
void __kernel_map_pages(struct page *page, int numpages, int enable)
L
Linus Torvalds 已提交
2176 2177 2178
{
	if (PageHighMem(page))
		return;
2179
	if (!enable) {
2180 2181
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
2182
	}
2183

2184
	/*
I
Ingo Molnar 已提交
2185
	 * The return value is ignored as the calls cannot fail.
2186 2187
	 * Large pages for identity mappings are not used at boot time
	 * and hence no memory allocations during large page split.
L
Linus Torvalds 已提交
2188
	 */
I
Ingo Molnar 已提交
2189 2190 2191 2192
	if (enable)
		__set_pages_p(page, numpages);
	else
		__set_pages_np(page, numpages);
2193 2194

	/*
2195
	 * We should perform an IPI and flush all tlbs,
2196 2197 2198
	 * but that can deadlock->flush only current cpu.
	 * Preemption needs to be disabled around __flush_tlb_all() due to
	 * CR3 reload in __native_flush_tlb().
L
Linus Torvalds 已提交
2199
	 */
2200
	preempt_disable();
L
Linus Torvalds 已提交
2201
	__flush_tlb_all();
2202
	preempt_enable();
2203 2204

	arch_flush_lazy_mmu_mode();
2205 2206
}

2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
#ifdef CONFIG_HIBERNATION
bool kernel_page_present(struct page *page)
{
	unsigned int level;
	pte_t *pte;

	if (PageHighMem(page))
		return false;

	pte = lookup_address((unsigned long)page_address(page), &level);
	return (pte_val(*pte) & _PAGE_PRESENT);
}
#endif /* CONFIG_HIBERNATION */

2221 2222
int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
				   unsigned numpages, unsigned long page_flags)
2223 2224 2225 2226 2227 2228 2229 2230 2231
{
	int retval = -EINVAL;

	struct cpa_data cpa = {
		.vaddr = &address,
		.pfn = pfn,
		.pgd = pgd,
		.numpages = numpages,
		.mask_set = __pgprot(0),
2232
		.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
2233 2234 2235
		.flags = 0,
	};

2236 2237
	WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");

2238 2239 2240
	if (!(__supported_pte_mask & _PAGE_NX))
		goto out;

2241 2242 2243
	if (!(page_flags & _PAGE_ENC))
		cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);

2244 2245 2246 2247 2248 2249 2250 2251 2252
	cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);

	retval = __change_page_attr_set_clr(&cpa, 0);
	__flush_tlb_all();

out:
	return retval;
}

2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
/*
 * __flush_tlb_all() flushes mappings only on current CPU and hence this
 * function shouldn't be used in an SMP environment. Presently, it's used only
 * during boot (way before smp_init()) by EFI subsystem and hence is ok.
 */
int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
				     unsigned long numpages)
{
	int retval;

	/*
	 * The typical sequence for unmapping is to find a pte through
	 * lookup_address_in_pgd() (ideally, it should never return NULL because
	 * the address is already mapped) and change it's protections. As pfn is
	 * the *target* of a mapping, it's not useful while unmapping.
	 */
	struct cpa_data cpa = {
		.vaddr		= &address,
		.pfn		= 0,
		.pgd		= pgd,
		.numpages	= numpages,
		.mask_set	= __pgprot(0),
		.mask_clr	= __pgprot(_PAGE_PRESENT | _PAGE_RW),
		.flags		= 0,
	};

	WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");

	retval = __change_page_attr_set_clr(&cpa, 0);
	__flush_tlb_all();

	return retval;
}

2287 2288 2289 2290 2291
/*
 * The testcases use internal knowledge of the implementation that shouldn't
 * be exposed to the rest of the kernel. Include these directly here.
 */
#ifdef CONFIG_CPA_DEBUG
2292
#include "cpa-test.c"
2293
#endif