tlb_64.c 8.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This file contains the routines for flushing entries from the
 * TLB and MMU hash table.
 *
 *  Derived from arch/ppc64/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */
23

L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
32
#include <asm/bug.h>
L
Linus Torvalds 已提交
33 34 35 36

DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);

/* This is declared as we are using the more or less generic
37
 * include/asm-powerpc/tlb.h file -- tgall
L
Linus Torvalds 已提交
38 39 40 41 42
 */
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
unsigned long pte_freelist_forced_free;

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
struct pte_freelist_batch
{
	struct rcu_head	rcu;
	unsigned int	index;
	pgtable_free_t	tables[0];
};

DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
unsigned long pte_freelist_forced_free;

#define PTE_FREELIST_SIZE \
	((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
	  / sizeof(pgtable_free_t))

static void pte_free_smp_sync(void *arg)
{
	/* Do nothing, just ensure we sync with all CPUs */
}

/* This is only called when we are critically out of memory
 * (and fail to get a page in pte_free_tlb).
 */
static void pgtable_free_now(pgtable_free_t pgf)
{
	pte_freelist_forced_free++;

	smp_call_function(pte_free_smp_sync, NULL, 0, 1);

	pgtable_free(pgf);
}

static void pte_free_rcu_callback(struct rcu_head *head)
{
	struct pte_freelist_batch *batch =
		container_of(head, struct pte_freelist_batch, rcu);
	unsigned int i;

	for (i = 0; i < batch->index; i++)
		pgtable_free(batch->tables[i]);

	free_page((unsigned long)batch);
}

static void pte_free_submit(struct pte_freelist_batch *batch)
{
	INIT_RCU_HEAD(&batch->rcu);
	call_rcu(&batch->rcu, pte_free_rcu_callback);
}

void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
L
Linus Torvalds 已提交
93
{
H
Hugh Dickins 已提交
94
	/* This is safe since tlb_gather_mmu has disabled preemption */
L
Linus Torvalds 已提交
95 96 97 98 99
        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
100
		pgtable_free(pgf);
L
Linus Torvalds 已提交
101 102 103 104 105 106
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
107
			pgtable_free_now(pgf);
L
Linus Torvalds 已提交
108 109 110 111
			return;
		}
		(*batchp)->index = 0;
	}
112
	(*batchp)->tables[(*batchp)->index++] = pgf;
L
Linus Torvalds 已提交
113 114 115 116 117 118 119
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}

/*
120 121 122 123 124 125
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
L
Linus Torvalds 已提交
126
 */
127 128
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, unsigned long pte, int huge)
L
Linus Torvalds 已提交
129 130
{
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
131
	unsigned long vsid, vaddr;
132
	unsigned int psize;
P
Paul Mackerras 已提交
133
	int ssize;
134
	real_pte_t rpte;
135
	int i;
L
Linus Torvalds 已提交
136 137 138

	i = batch->index;

139 140 141 142 143
	/* We mask the address for the base page size. Huge pages will
	 * have applied their own masking already
	 */
	addr &= PAGE_MASK;

144 145 146 147 148 149 150
	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
	 * must still exist (and thus the VMA not reused) at the time
	 * of this call
	 */
151 152 153 154 155
	if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
		psize = mmu_huge_psize;
#else
		BUG();
156
		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
157
#endif
158
	} else
159
		psize = pte_pagesize_index(mm, addr, pte);
160

161 162
	/* Build full vaddr */
	if (!is_kernel_addr(addr)) {
P
Paul Mackerras 已提交
163 164
		ssize = user_segment_size(addr);
		vsid = get_vsid(mm->context.id, addr, ssize);
165
		WARN_ON(vsid == 0);
P
Paul Mackerras 已提交
166 167 168 169 170
	} else {
		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
		ssize = mmu_kernel_ssize;
	}
	vaddr = hpt_va(addr, vsid, ssize);
171 172 173 174 175 176 177 178 179
	rpte = __real_pte(__pte(pte), ptep);

	/*
	 * Check if we have an active batch on this CPU. If not, just
	 * flush now and return. For now, we don global invalidates
	 * in that case, might be worth testing the mm cpu mask though
	 * and decide to use local invalidates instead...
	 */
	if (!batch->active) {
P
Paul Mackerras 已提交
180
		flush_hash_page(vaddr, rpte, psize, ssize, 0);
181 182 183
		return;
	}

L
Linus Torvalds 已提交
184 185 186 187 188 189
	/*
	 * This can happen when we are in the middle of a TLB batch and
	 * we encounter memory pressure (eg copy_page_range when it tries
	 * to allocate a new pte). If we have to reclaim memory and end
	 * up scanning and resetting referenced bits then our batch context
	 * will change mid stream.
190 191 192
	 *
	 * We also need to ensure only one page size is present in a given
	 * batch
L
Linus Torvalds 已提交
193
	 */
P
Paul Mackerras 已提交
194 195
	if (i != 0 && (mm != batch->mm || batch->psize != psize ||
		       batch->ssize != ssize)) {
196
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
197 198 199 200
		i = 0;
	}
	if (i == 0) {
		batch->mm = mm;
201
		batch->psize = psize;
P
Paul Mackerras 已提交
202
		batch->ssize = ssize;
L
Linus Torvalds 已提交
203
	}
204 205
	batch->pte[i] = rpte;
	batch->vaddr[i] = vaddr;
L
Linus Torvalds 已提交
206 207
	batch->index = ++i;
	if (i >= PPC64_TLB_BATCH_NR)
208
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
209 210
}

211 212 213 214 215 216 217
/*
 * This function is called when terminating an mmu batch or when a batch
 * is full. It will perform the flush of all the entries currently stored
 * in a batch.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
 */
L
Linus Torvalds 已提交
218 219 220
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
	cpumask_t tmp;
221
	int i, local = 0;
L
Linus Torvalds 已提交
222 223

	i = batch->index;
224
	tmp = cpumask_of_cpu(smp_processor_id());
L
Linus Torvalds 已提交
225 226 227
	if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
		local = 1;
	if (i == 1)
228
		flush_hash_page(batch->vaddr[0], batch->pte[0],
P
Paul Mackerras 已提交
229
				batch->psize, batch->ssize, local);
L
Linus Torvalds 已提交
230
	else
231
		flush_hash_range(i, local);
L
Linus Torvalds 已提交
232 233 234 235 236
	batch->index = 0;
}

void pte_free_finish(void)
{
H
Hugh Dickins 已提交
237
	/* This is safe since tlb_gather_mmu has disabled preemption */
L
Linus Torvalds 已提交
238 239 240 241 242 243 244
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (*batchp == NULL)
		return;
	pte_free_submit(*batchp);
	*batchp = NULL;
}
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300

/**
 * __flush_hash_table_range - Flush all HPTEs for a given address range
 *                            from the hash table (and the TLB). But keeps
 *                            the linux PTEs intact.
 *
 * @mm		: mm_struct of the target address space (generally init_mm)
 * @start	: starting address
 * @end         : ending address (not included in the flush)
 *
 * This function is mostly to be used by some IO hotplug code in order
 * to remove all hash entries from a given address range used to map IO
 * space on a removed PCI-PCI bidge without tearing down the full mapping
 * since 64K pages may overlap with other bridges when using 64K pages
 * with 4K HW pages on IO space.
 *
 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
 * and is implemented for small size rather than speed.
 */
#ifdef CONFIG_HOTPLUG

void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte(mm->pgd, start);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (!(pte & _PAGE_HASHPTE))
			continue;
		hpte_need_flush(mm, start, ptep, pte, 0);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}

#endif /* CONFIG_HOTPLUG */