pgtable.c 13.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6
/*
 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
 */

#include <linux/sched.h>
7
#include <linux/mm_types.h>
8
#include <linux/memblock.h>
9
#include <misc/cxl-base.h>
10

11
#include <asm/debugfs.h>
12 13
#include <asm/pgalloc.h>
#include <asm/tlb.h>
14 15
#include <asm/trace.h>
#include <asm/powernv.h>
16 17
#include <asm/firmware.h>
#include <asm/ultravisor.h>
18

19
#include <mm/mmu_decl.h>
20 21
#include <trace/events/thp.h>

22 23 24 25 26
unsigned long __pmd_frag_nr;
EXPORT_SYMBOL(__pmd_frag_nr);
unsigned long __pmd_frag_size_shift;
EXPORT_SYMBOL(__pmd_frag_size_shift);

27 28 29 30 31 32 33 34 35 36 37 38 39
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * This is called when relaxing access to a hugepage. It's also called in the page
 * fault path when we don't hit any of the major fault cases, ie, a minor
 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
 * handled those two for us, we additionally deal with missing execute
 * permission here on some processors
 */
int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
			  pmd_t *pmdp, pmd_t entry, int dirty)
{
	int changed;
#ifdef CONFIG_DEBUG_VM
40
	WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
41
	assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
42 43 44
#endif
	changed = !pmd_same(*(pmdp), entry);
	if (changed) {
45 46 47 48 49 50
		/*
		 * We can use MMU_PAGE_2M here, because only radix
		 * path look at the psize.
		 */
		__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
					pmd_pte(entry), address, MMU_PAGE_2M);
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
	}
	return changed;
}

int pmdp_test_and_clear_young(struct vm_area_struct *vma,
			      unsigned long address, pmd_t *pmdp)
{
	return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
}
/*
 * set a new huge pmd. We should not be called for updating
 * an existing pmd entry. That should go via pmd_hugepage_update.
 */
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_DEBUG_VM
68 69 70 71
	/*
	 * Make sure hardware valid bit is not set. We don't do
	 * tlb flush for this update.
	 */
72 73

	WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
74
	assert_spin_locked(pmd_lockptr(mm, pmdp));
75
	WARN_ON(!(pmd_large(pmd)));
76 77 78 79
#endif
	trace_hugepage_set_pmd(addr, pmd_val(pmd));
	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

static void do_nothing(void *unused)
{

}
/*
 * Serialize against find_current_mm_pte which does lock-less
 * lookup in page tables with local interrupts disabled. For huge pages
 * it casts pmd_t to pte_t. Since format of pte_t is different from
 * pmd_t we want to prevent transit from pmd pointing to page table
 * to pmd pointing to huge page (and back) while interrupts are disabled.
 * We clear pmd to possibly replace it with page table pointer in
 * different code paths. So make sure we wait for the parallel
 * find_current_mm_pte to finish.
 */
void serialize_against_pte_lookup(struct mm_struct *mm)
{
	smp_mb();
98
	smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
99 100
}

101 102 103 104
/*
 * We use this to invalidate a pmdp entry before switching from a
 * hugepte to regular pmd entry.
 */
105
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
106 107
		     pmd_t *pmdp)
{
108 109
	unsigned long old_pmd;

110
	old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
111
	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
112
	return __pmd(old_pmd);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
}

static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
{
	return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
}

pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
{
	unsigned long pmdv;

	pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
	return pmd_set_protbits(__pmd(pmdv), pgprot);
}

pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
{
	return pfn_pmd(page_to_pfn(page), pgprot);
}

pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	unsigned long pmdv;

	pmdv = pmd_val(pmd);
	pmdv &= _HPAGE_CHG_MASK;
	return pmd_set_protbits(__pmd(pmdv), newprot);
}

/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux HUGE PMD entry.
 */
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
			  pmd_t *pmd)
{
151 152
	if (radix_enabled())
		prefetch((void *)addr);
153 154
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
155 156 157 158 159 160 161 162 163

/* For use by kexec */
void mmu_cleanup_all(void)
{
	if (radix_enabled())
		radix__mmu_cleanup_all();
	else if (mmu_hash_ops.hpte_clear_all)
		mmu_hash_ops.hpte_clear_all();
}
164 165

#ifdef CONFIG_MEMORY_HOTPLUG
166 167
int __meminit create_section_mapping(unsigned long start, unsigned long end,
				     int nid, pgprot_t prot)
168 169
{
	if (radix_enabled())
170
		return radix__create_section_mapping(start, end, nid, prot);
171

172
	return hash__create_section_mapping(start, end, nid, prot);
173 174
}

175
int __meminit remove_section_mapping(unsigned long start, unsigned long end)
176 177
{
	if (radix_enabled())
178
		return radix__remove_section_mapping(start, end);
179 180 181 182

	return hash__remove_section_mapping(start, end);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
183 184 185 186 187 188 189 190

void __init mmu_partition_table_init(void)
{
	unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
	unsigned long ptcr;

	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
	/* Initialize the Partition Table with no entries */
191
	partition_tb = memblock_alloc(patb_size, patb_size);
192 193 194
	if (!partition_tb)
		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
		      __func__, patb_size, patb_size);
195 196 197 198 199 200

	/*
	 * update partition table control register,
	 * 64 K size.
	 */
	ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
201
	set_ptcr_when_no_uv(ptcr);
202 203 204
	powernv_set_nmmu_ptcr(ptcr);
}

205
static void flush_partition(unsigned int lpid, bool radix)
206
{
207
	if (radix) {
208 209
		radix__flush_all_lpid(lpid);
		radix__flush_all_lpid_guest(lpid);
210
	} else {
211
		asm volatile("ptesync" : : : "memory");
212 213
		asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
			     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
214 215
		/* do we need fixup here ?*/
		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
216 217 218
		trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
	}
}
219 220

void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
221
				  unsigned long dw1, bool flush)
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
{
	unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);

	/*
	 * When ultravisor is enabled, the partition table is stored in secure
	 * memory and can only be accessed doing an ultravisor call. However, we
	 * maintain a copy of the partition table in normal memory to allow Nest
	 * MMU translations to occur (for normal VMs).
	 *
	 * Therefore, here we always update partition_tb, regardless of whether
	 * we are running under an ultravisor or not.
	 */
	partition_tb[lpid].patb0 = cpu_to_be64(dw0);
	partition_tb[lpid].patb1 = cpu_to_be64(dw1);

	/*
	 * If ultravisor is enabled, we do an ultravisor call to register the
	 * partition table entry (PATE), which also do a global flush of TLBs
	 * and partition table caches for the lpid. Otherwise, just do the
	 * flush. The type of flush (hash or radix) depends on what the previous
	 * use of the partition ID was, not the new use.
	 */
	if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
		uv_register_pate(lpid, dw0, dw1);
		pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
			dw0, dw1);
248
	} else if (flush) {
249 250 251 252 253
		/*
		 * Boot does not need to flush, because MMU is off and each
		 * CPU does a tlbiel_all() before switching them on, which
		 * flushes everything.
		 */
254 255 256
		flush_partition(lpid, (old & PATB_HR));
	}
}
257
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
258

259 260 261 262
static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
{
	void *pmd_frag, *ret;

263 264 265
	if (PMD_FRAG_NR == 1)
		return NULL;

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
	spin_lock(&mm->page_table_lock);
	ret = mm->context.pmd_frag;
	if (ret) {
		pmd_frag = ret + PMD_FRAG_SIZE;
		/*
		 * If we have taken up all the fragments mark PTE page NULL
		 */
		if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
			pmd_frag = NULL;
		mm->context.pmd_frag = pmd_frag;
	}
	spin_unlock(&mm->page_table_lock);
	return (pmd_t *)ret;
}

static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
{
	void *ret = NULL;
	struct page *page;
	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;

	if (mm == &init_mm)
		gfp &= ~__GFP_ACCOUNT;
	page = alloc_page(gfp);
	if (!page)
		return NULL;
	if (!pgtable_pmd_page_ctor(page)) {
		__free_pages(page, 0);
		return NULL;
	}

297 298
	atomic_set(&page->pt_frag_refcount, 1);

299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
	ret = page_address(page);
	/*
	 * if we support only one fragment just return the
	 * allocated page.
	 */
	if (PMD_FRAG_NR == 1)
		return ret;

	spin_lock(&mm->page_table_lock);
	/*
	 * If we find pgtable_page set, we return
	 * the allocated page with single fragement
	 * count.
	 */
	if (likely(!mm->context.pmd_frag)) {
314
		atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
		mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
	}
	spin_unlock(&mm->page_table_lock);

	return (pmd_t *)ret;
}

pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
{
	pmd_t *pmd;

	pmd = get_pmd_from_cache(mm);
	if (pmd)
		return pmd;

	return __alloc_for_pmdcache(mm);
}

void pmd_fragment_free(unsigned long *pmd)
{
	struct page *page = virt_to_page(pmd);

337 338
	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
339
		pgtable_pmd_page_dtor(page);
340
		__free_page(page);
341 342 343
	}
}

344 345 346 347 348 349 350
static inline void pgtable_free(void *table, int index)
{
	switch (index) {
	case PTE_INDEX:
		pte_fragment_free(table, 0);
		break;
	case PMD_INDEX:
351
		pmd_fragment_free(table);
352 353 354 355
		break;
	case PUD_INDEX:
		kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
		break;
356 357 358 359 360 361 362 363 364 365 366 367
#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
		/* 16M hugepd directory at pud level */
	case HTLB_16M_INDEX:
		BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
		kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
		break;
		/* 16G hugepd directory at the pgd level */
	case HTLB_16G_INDEX:
		BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
		kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
		break;
#endif
368 369 370 371 372 373 374
		/* We don't free pgd table via RCU callback */
	default:
		BUG();
	}
}

void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
375 376 377
{
	unsigned long pgf = (unsigned long)table;

378 379
	BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
	pgf |= index;
380 381 382 383 384 385
	tlb_remove_table(tlb, (void *)pgf);
}

void __tlb_remove_table(void *_table)
{
	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
386
	unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
387

388
	return pgtable_free(table, index);
389
}
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411

#ifdef CONFIG_PROC_FS
atomic_long_t direct_pages_count[MMU_PAGE_COUNT];

void arch_report_meminfo(struct seq_file *m)
{
	/*
	 * Hash maps the memory with one size mmu_linear_psize.
	 * So don't bother to print these on hash
	 */
	if (!radix_enabled())
		return;
	seq_printf(m, "DirectMap4k:    %8lu kB\n",
		   atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
	seq_printf(m, "DirectMap64k:    %8lu kB\n",
		   atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
	seq_printf(m, "DirectMap2M:    %8lu kB\n",
		   atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
	seq_printf(m, "DirectMap1G:    %8lu kB\n",
		   atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
}
#endif /* CONFIG_PROC_FS */
412

413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
			     pte_t *ptep)
{
	unsigned long pte_val;

	/*
	 * Clear the _PAGE_PRESENT so that no hardware parallel update is
	 * possible. Also keep the pte_present true so that we don't take
	 * wrong fault.
	 */
	pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);

	return __pte(pte_val);

}

void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
			     pte_t *ptep, pte_t old_pte, pte_t pte)
{
	if (radix_enabled())
		return radix__ptep_modify_prot_commit(vma, addr,
						      ptep, old_pte, pte);
	set_pte_at(vma->vm_mm, addr, ptep, pte);
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
/*
 * For hash translation mode, we use the deposited table to store hash slot
 * information and they are stored at PTRS_PER_PMD offset from related pmd
 * location. Hence a pmd move requires deposit and withdraw.
 *
 * For radix translation with split pmd ptl, we store the deposited table in the
 * pmd page. Hence if we have different pmd page we need to withdraw during pmd
 * move.
 *
 * With hash we use deposited table always irrespective of anon or not.
 * With radix we use deposited table only for anonymous mapping.
 */
int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
			   struct spinlock *old_pmd_ptl,
			   struct vm_area_struct *vma)
{
	if (radix_enabled())
		return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);

	return true;
}
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504

/*
 * Does the CPU support tlbie?
 */
bool tlbie_capable __read_mostly = true;
EXPORT_SYMBOL(tlbie_capable);

/*
 * Should tlbie be used for management of CPU TLBs, for kernel and process
 * address spaces? tlbie may still be used for nMMU accelerators, and for KVM
 * guest address spaces.
 */
bool tlbie_enabled __read_mostly = true;

static int __init setup_disable_tlbie(char *str)
{
	if (!radix_enabled()) {
		pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
		return 1;
	}

	tlbie_capable = false;
	tlbie_enabled = false;

        return 1;
}
__setup("disable_tlbie", setup_disable_tlbie);

static int __init pgtable_debugfs_setup(void)
{
	if (!tlbie_capable)
		return 0;

	/*
	 * There is no locking vs tlb flushing when changing this value.
	 * The tlb flushers will see one value or another, and use either
	 * tlbie or tlbiel with IPIs. In both cases the TLBs will be
	 * invalidated as expected.
	 */
	debugfs_create_bool("tlbie_enabled", 0600,
			powerpc_debugfs_root,
			&tlbie_enabled);

	return 0;
}
arch_initcall(pgtable_debugfs_setup);