tlb_64.c 8.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This file contains the routines for flushing entries from the
 * TLB and MMU hash table.
 *
 *  Derived from arch/ppc64/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */
23

L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
32
#include <asm/bug.h>
L
Linus Torvalds 已提交
33 34 35 36

DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);

/* This is declared as we are using the more or less generic
37
 * include/asm-powerpc/tlb.h file -- tgall
L
Linus Torvalds 已提交
38 39
 */
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40 41
static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
static unsigned long pte_freelist_forced_free;
L
Linus Torvalds 已提交
42

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
struct pte_freelist_batch
{
	struct rcu_head	rcu;
	unsigned int	index;
	pgtable_free_t	tables[0];
};

#define PTE_FREELIST_SIZE \
	((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
	  / sizeof(pgtable_free_t))

static void pte_free_smp_sync(void *arg)
{
	/* Do nothing, just ensure we sync with all CPUs */
}

/* This is only called when we are critically out of memory
 * (and fail to get a page in pte_free_tlb).
 */
static void pgtable_free_now(pgtable_free_t pgf)
{
	pte_freelist_forced_free++;

66
	smp_call_function(pte_free_smp_sync, NULL, 1);
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89

	pgtable_free(pgf);
}

static void pte_free_rcu_callback(struct rcu_head *head)
{
	struct pte_freelist_batch *batch =
		container_of(head, struct pte_freelist_batch, rcu);
	unsigned int i;

	for (i = 0; i < batch->index; i++)
		pgtable_free(batch->tables[i]);

	free_page((unsigned long)batch);
}

static void pte_free_submit(struct pte_freelist_batch *batch)
{
	INIT_RCU_HEAD(&batch->rcu);
	call_rcu(&batch->rcu, pte_free_rcu_callback);
}

void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
L
Linus Torvalds 已提交
90
{
H
Hugh Dickins 已提交
91
	/* This is safe since tlb_gather_mmu has disabled preemption */
L
Linus Torvalds 已提交
92 93 94 95 96
        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
97
		pgtable_free(pgf);
L
Linus Torvalds 已提交
98 99 100 101 102 103
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
104
			pgtable_free_now(pgf);
L
Linus Torvalds 已提交
105 106 107 108
			return;
		}
		(*batchp)->index = 0;
	}
109
	(*batchp)->tables[(*batchp)->index++] = pgf;
L
Linus Torvalds 已提交
110 111 112 113 114 115 116
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}

/*
117 118 119 120 121 122
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
L
Linus Torvalds 已提交
123
 */
124 125
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, unsigned long pte, int huge)
L
Linus Torvalds 已提交
126 127
{
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
128
	unsigned long vsid, vaddr;
129
	unsigned int psize;
P
Paul Mackerras 已提交
130
	int ssize;
131
	real_pte_t rpte;
132
	int i;
L
Linus Torvalds 已提交
133 134 135

	i = batch->index;

136 137 138 139 140
	/* We mask the address for the base page size. Huge pages will
	 * have applied their own masking already
	 */
	addr &= PAGE_MASK;

141 142 143 144 145 146 147
	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
	 * must still exist (and thus the VMA not reused) at the time
	 * of this call
	 */
148 149
	if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
150
		psize = get_slice_psize(mm, addr);;
151 152
#else
		BUG();
153
		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
154
#endif
155
	} else
156
		psize = pte_pagesize_index(mm, addr, pte);
157

158 159
	/* Build full vaddr */
	if (!is_kernel_addr(addr)) {
P
Paul Mackerras 已提交
160 161
		ssize = user_segment_size(addr);
		vsid = get_vsid(mm->context.id, addr, ssize);
162
		WARN_ON(vsid == 0);
P
Paul Mackerras 已提交
163 164 165 166 167
	} else {
		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
		ssize = mmu_kernel_ssize;
	}
	vaddr = hpt_va(addr, vsid, ssize);
168 169 170 171 172 173 174 175 176
	rpte = __real_pte(__pte(pte), ptep);

	/*
	 * Check if we have an active batch on this CPU. If not, just
	 * flush now and return. For now, we don global invalidates
	 * in that case, might be worth testing the mm cpu mask though
	 * and decide to use local invalidates instead...
	 */
	if (!batch->active) {
P
Paul Mackerras 已提交
177
		flush_hash_page(vaddr, rpte, psize, ssize, 0);
178 179 180
		return;
	}

L
Linus Torvalds 已提交
181 182 183 184 185 186
	/*
	 * This can happen when we are in the middle of a TLB batch and
	 * we encounter memory pressure (eg copy_page_range when it tries
	 * to allocate a new pte). If we have to reclaim memory and end
	 * up scanning and resetting referenced bits then our batch context
	 * will change mid stream.
187 188 189
	 *
	 * We also need to ensure only one page size is present in a given
	 * batch
L
Linus Torvalds 已提交
190
	 */
P
Paul Mackerras 已提交
191 192
	if (i != 0 && (mm != batch->mm || batch->psize != psize ||
		       batch->ssize != ssize)) {
193
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
194 195 196 197
		i = 0;
	}
	if (i == 0) {
		batch->mm = mm;
198
		batch->psize = psize;
P
Paul Mackerras 已提交
199
		batch->ssize = ssize;
L
Linus Torvalds 已提交
200
	}
201 202
	batch->pte[i] = rpte;
	batch->vaddr[i] = vaddr;
L
Linus Torvalds 已提交
203 204
	batch->index = ++i;
	if (i >= PPC64_TLB_BATCH_NR)
205
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
206 207
}

208 209 210 211 212 213 214
/*
 * This function is called when terminating an mmu batch or when a batch
 * is full. It will perform the flush of all the entries currently stored
 * in a batch.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
 */
L
Linus Torvalds 已提交
215 216 217
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
	cpumask_t tmp;
218
	int i, local = 0;
L
Linus Torvalds 已提交
219 220

	i = batch->index;
221
	tmp = cpumask_of_cpu(smp_processor_id());
L
Linus Torvalds 已提交
222 223 224
	if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
		local = 1;
	if (i == 1)
225
		flush_hash_page(batch->vaddr[0], batch->pte[0],
P
Paul Mackerras 已提交
226
				batch->psize, batch->ssize, local);
L
Linus Torvalds 已提交
227
	else
228
		flush_hash_range(i, local);
L
Linus Torvalds 已提交
229 230 231 232 233
	batch->index = 0;
}

void pte_free_finish(void)
{
H
Hugh Dickins 已提交
234
	/* This is safe since tlb_gather_mmu has disabled preemption */
L
Linus Torvalds 已提交
235 236 237 238 239 240 241
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (*batchp == NULL)
		return;
	pte_free_submit(*batchp);
	*batchp = NULL;
}
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297

/**
 * __flush_hash_table_range - Flush all HPTEs for a given address range
 *                            from the hash table (and the TLB). But keeps
 *                            the linux PTEs intact.
 *
 * @mm		: mm_struct of the target address space (generally init_mm)
 * @start	: starting address
 * @end         : ending address (not included in the flush)
 *
 * This function is mostly to be used by some IO hotplug code in order
 * to remove all hash entries from a given address range used to map IO
 * space on a removed PCI-PCI bidge without tearing down the full mapping
 * since 64K pages may overlap with other bridges when using 64K pages
 * with 4K HW pages on IO space.
 *
 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
 * and is implemented for small size rather than speed.
 */
#ifdef CONFIG_HOTPLUG

void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte(mm->pgd, start);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (!(pte & _PAGE_HASHPTE))
			continue;
		hpte_need_flush(mm, start, ptep, pte, 0);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}

#endif /* CONFIG_HOTPLUG */