tlb_64.c 6.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * This file contains the routines for flushing entries from the
 * TLB and MMU hash table.
 *
 *  Derived from arch/ppc64/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */
24

L
Linus Torvalds 已提交
25 26 27 28 29 30 31 32
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
33
#include <asm/bug.h>
L
Linus Torvalds 已提交
34 35 36 37

DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);

/* This is declared as we are using the more or less generic
38
 * include/asm-powerpc/tlb.h file -- tgall
L
Linus Torvalds 已提交
39 40 41 42 43
 */
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
unsigned long pte_freelist_forced_free;

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
struct pte_freelist_batch
{
	struct rcu_head	rcu;
	unsigned int	index;
	pgtable_free_t	tables[0];
};

DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
unsigned long pte_freelist_forced_free;

#define PTE_FREELIST_SIZE \
	((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
	  / sizeof(pgtable_free_t))

#ifdef CONFIG_SMP
static void pte_free_smp_sync(void *arg)
{
	/* Do nothing, just ensure we sync with all CPUs */
}
#endif

/* This is only called when we are critically out of memory
 * (and fail to get a page in pte_free_tlb).
 */
static void pgtable_free_now(pgtable_free_t pgf)
{
	pte_freelist_forced_free++;

	smp_call_function(pte_free_smp_sync, NULL, 0, 1);

	pgtable_free(pgf);
}

static void pte_free_rcu_callback(struct rcu_head *head)
{
	struct pte_freelist_batch *batch =
		container_of(head, struct pte_freelist_batch, rcu);
	unsigned int i;

	for (i = 0; i < batch->index; i++)
		pgtable_free(batch->tables[i]);

	free_page((unsigned long)batch);
}

static void pte_free_submit(struct pte_freelist_batch *batch)
{
	INIT_RCU_HEAD(&batch->rcu);
	call_rcu(&batch->rcu, pte_free_rcu_callback);
}

void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
L
Linus Torvalds 已提交
96
{
H
Hugh Dickins 已提交
97
	/* This is safe since tlb_gather_mmu has disabled preemption */
L
Linus Torvalds 已提交
98 99 100 101 102
        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
103
		pgtable_free(pgf);
L
Linus Torvalds 已提交
104 105 106 107 108 109
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
110
			pgtable_free_now(pgf);
L
Linus Torvalds 已提交
111 112 113 114
			return;
		}
		(*batchp)->index = 0;
	}
115
	(*batchp)->tables[(*batchp)->index++] = pgf;
L
Linus Torvalds 已提交
116 117 118 119 120 121 122
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}

/*
123 124 125 126 127 128
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
L
Linus Torvalds 已提交
129
 */
130 131
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, unsigned long pte, int huge)
L
Linus Torvalds 已提交
132 133
{
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
134
	unsigned long vsid, vaddr;
135
	unsigned int psize;
136
	real_pte_t rpte;
137
	int i;
L
Linus Torvalds 已提交
138 139 140

	i = batch->index;

141 142 143 144 145
	/* We mask the address for the base page size. Huge pages will
	 * have applied their own masking already
	 */
	addr &= PAGE_MASK;

146 147 148 149 150 151 152
	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
	 * must still exist (and thus the VMA not reused) at the time
	 * of this call
	 */
153 154 155 156 157
	if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
		psize = mmu_huge_psize;
#else
		BUG();
158
		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
159
#endif
160
	} else
161
		psize = pte_pagesize_index(mm, addr, pte);
162

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
	/* Build full vaddr */
	if (!is_kernel_addr(addr)) {
		vsid = get_vsid(mm->context.id, addr);
		WARN_ON(vsid == 0);
	} else
		vsid = get_kernel_vsid(addr);
	vaddr = (vsid << 28 ) | (addr & 0x0fffffff);
	rpte = __real_pte(__pte(pte), ptep);

	/*
	 * Check if we have an active batch on this CPU. If not, just
	 * flush now and return. For now, we don global invalidates
	 * in that case, might be worth testing the mm cpu mask though
	 * and decide to use local invalidates instead...
	 */
	if (!batch->active) {
		flush_hash_page(vaddr, rpte, psize, 0);
		return;
	}

L
Linus Torvalds 已提交
183 184 185 186 187 188
	/*
	 * This can happen when we are in the middle of a TLB batch and
	 * we encounter memory pressure (eg copy_page_range when it tries
	 * to allocate a new pte). If we have to reclaim memory and end
	 * up scanning and resetting referenced bits then our batch context
	 * will change mid stream.
189 190 191
	 *
	 * We also need to ensure only one page size is present in a given
	 * batch
L
Linus Torvalds 已提交
192
	 */
193
	if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
194
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
195 196 197 198
		i = 0;
	}
	if (i == 0) {
		batch->mm = mm;
199
		batch->psize = psize;
L
Linus Torvalds 已提交
200
	}
201 202
	batch->pte[i] = rpte;
	batch->vaddr[i] = vaddr;
L
Linus Torvalds 已提交
203 204
	batch->index = ++i;
	if (i >= PPC64_TLB_BATCH_NR)
205
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
206 207
}

208 209 210 211 212 213 214
/*
 * This function is called when terminating an mmu batch or when a batch
 * is full. It will perform the flush of all the entries currently stored
 * in a batch.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
 */
L
Linus Torvalds 已提交
215 216 217
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
	cpumask_t tmp;
218
	int i, local = 0;
L
Linus Torvalds 已提交
219 220

	i = batch->index;
221
	tmp = cpumask_of_cpu(smp_processor_id());
L
Linus Torvalds 已提交
222 223 224
	if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
		local = 1;
	if (i == 1)
225 226
		flush_hash_page(batch->vaddr[0], batch->pte[0],
				batch->psize, local);
L
Linus Torvalds 已提交
227
	else
228
		flush_hash_range(i, local);
L
Linus Torvalds 已提交
229 230 231 232 233
	batch->index = 0;
}

void pte_free_finish(void)
{
H
Hugh Dickins 已提交
234
	/* This is safe since tlb_gather_mmu has disabled preemption */
L
Linus Torvalds 已提交
235 236 237 238 239 240 241
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (*batchp == NULL)
		return;
	pte_free_submit(*batchp);
	*batchp = NULL;
}