pgtable-book3s64.c 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/sched.h>
11
#include <linux/mm_types.h>
12
#include <misc/cxl-base.h>
13

14 15 16 17 18 19
#include <asm/pgalloc.h>
#include <asm/tlb.h>

#include "mmu_decl.h"
#include <trace/events/thp.h>

20 21 22
int (*register_process_table)(unsigned long base, unsigned long page_size,
			      unsigned long tbl_size);

23 24 25 26 27 28 29 30 31 32 33 34 35
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * This is called when relaxing access to a hugepage. It's also called in the page
 * fault path when we don't hit any of the major fault cases, ie, a minor
 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
 * handled those two for us, we additionally deal with missing execute
 * permission here on some processors
 */
int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
			  pmd_t *pmdp, pmd_t entry, int dirty)
{
	int changed;
#ifdef CONFIG_DEBUG_VM
36
	WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
37 38 39 40
	assert_spin_locked(&vma->vm_mm->page_table_lock);
#endif
	changed = !pmd_same(*(pmdp), entry);
	if (changed) {
41 42
		__ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
					pmd_pte(entry), address);
43
		flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
	}
	return changed;
}

int pmdp_test_and_clear_young(struct vm_area_struct *vma,
			      unsigned long address, pmd_t *pmdp)
{
	return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
}
/*
 * set a new huge pmd. We should not be called for updating
 * an existing pmd entry. That should go via pmd_hugepage_update.
 */
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_DEBUG_VM
	WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
	assert_spin_locked(&mm->page_table_lock);
63
	WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
64 65 66 67
#endif
	trace_hugepage_set_pmd(addr, pmd_val(pmd));
	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85

static void do_nothing(void *unused)
{

}
/*
 * Serialize against find_current_mm_pte which does lock-less
 * lookup in page tables with local interrupts disabled. For huge pages
 * it casts pmd_t to pte_t. Since format of pte_t is different from
 * pmd_t we want to prevent transit from pmd pointing to page table
 * to pmd pointing to huge page (and back) while interrupts are disabled.
 * We clear pmd to possibly replace it with page table pointer in
 * different code paths. So make sure we wait for the parallel
 * find_current_mm_pte to finish.
 */
void serialize_against_pte_lookup(struct mm_struct *mm)
{
	smp_mb();
86
	smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
87 88
}

89 90 91 92
/*
 * We use this to invalidate a pmdp entry before switching from a
 * hugepte to regular pmd entry.
 */
93
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
94 95
		     pmd_t *pmdp)
{
96 97 98
	unsigned long old_pmd;

	old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
99
	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
100 101 102 103
	/*
	 * This ensures that generic code that rely on IRQ disabling
	 * to prevent a parallel THP split work as expected.
	 */
104
	serialize_against_pte_lookup(vma->vm_mm);
105
	return __pmd(old_pmd);
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
}

static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
{
	return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
}

pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
{
	unsigned long pmdv;

	pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
	return pmd_set_protbits(__pmd(pmdv), pgprot);
}

pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
{
	return pfn_pmd(page_to_pfn(page), pgprot);
}

pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
	unsigned long pmdv;

	pmdv = pmd_val(pmd);
	pmdv &= _HPAGE_CHG_MASK;
	return pmd_set_protbits(__pmd(pmdv), newprot);
}

/*
 * This is called at the end of handling a user page fault, when the
 * fault has been handled by updating a HUGE PMD entry in the linux page tables.
 * We use it to preload an HPTE into the hash table corresponding to
 * the updated linux HUGE PMD entry.
 */
void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
			  pmd_t *pmd)
{
	return;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
147 148 149 150 151 152 153 154 155

/* For use by kexec */
void mmu_cleanup_all(void)
{
	if (radix_enabled())
		radix__mmu_cleanup_all();
	else if (mmu_hash_ops.hpte_clear_all)
		mmu_hash_ops.hpte_clear_all();
}
156 157

#ifdef CONFIG_MEMORY_HOTPLUG
158
int __meminit create_section_mapping(unsigned long start, unsigned long end)
159 160
{
	if (radix_enabled())
161
		return radix__create_section_mapping(start, end);
162 163 164 165

	return hash__create_section_mapping(start, end);
}

166
int __meminit remove_section_mapping(unsigned long start, unsigned long end)
167 168
{
	if (radix_enabled())
169
		return radix__remove_section_mapping(start, end);
170 171 172 173

	return hash__remove_section_mapping(start, end);
}
#endif /* CONFIG_MEMORY_HOTPLUG */