tlb_64.c 8.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This file contains the routines for flushing entries from the
 * TLB and MMU hash table.
 *
 *  Derived from arch/ppc64/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */
23

L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
32
#include <asm/bug.h>
L
Linus Torvalds 已提交
33 34 35 36

DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);

/* This is declared as we are using the more or less generic
37
 * include/asm-powerpc/tlb.h file -- tgall
L
Linus Torvalds 已提交
38 39 40 41 42
 */
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
unsigned long pte_freelist_forced_free;

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
struct pte_freelist_batch
{
	struct rcu_head	rcu;
	unsigned int	index;
	pgtable_free_t	tables[0];
};

DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
unsigned long pte_freelist_forced_free;

#define PTE_FREELIST_SIZE \
	((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
	  / sizeof(pgtable_free_t))

#ifdef CONFIG_SMP
static void pte_free_smp_sync(void *arg)
{
	/* Do nothing, just ensure we sync with all CPUs */
}
#endif

/* This is only called when we are critically out of memory
 * (and fail to get a page in pte_free_tlb).
 */
static void pgtable_free_now(pgtable_free_t pgf)
{
	pte_freelist_forced_free++;

	smp_call_function(pte_free_smp_sync, NULL, 0, 1);

	pgtable_free(pgf);
}

static void pte_free_rcu_callback(struct rcu_head *head)
{
	struct pte_freelist_batch *batch =
		container_of(head, struct pte_freelist_batch, rcu);
	unsigned int i;

	for (i = 0; i < batch->index; i++)
		pgtable_free(batch->tables[i]);

	free_page((unsigned long)batch);
}

static void pte_free_submit(struct pte_freelist_batch *batch)
{
	INIT_RCU_HEAD(&batch->rcu);
	call_rcu(&batch->rcu, pte_free_rcu_callback);
}

void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
L
Linus Torvalds 已提交
95
{
H
Hugh Dickins 已提交
96
	/* This is safe since tlb_gather_mmu has disabled preemption */
L
Linus Torvalds 已提交
97 98 99 100 101
        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
102
		pgtable_free(pgf);
L
Linus Torvalds 已提交
103 104 105 106 107 108
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
109
			pgtable_free_now(pgf);
L
Linus Torvalds 已提交
110 111 112 113
			return;
		}
		(*batchp)->index = 0;
	}
114
	(*batchp)->tables[(*batchp)->index++] = pgf;
L
Linus Torvalds 已提交
115 116 117 118 119 120 121
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}

/*
122 123 124 125 126 127
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
L
Linus Torvalds 已提交
128
 */
129 130
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, unsigned long pte, int huge)
L
Linus Torvalds 已提交
131 132
{
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
133
	unsigned long vsid, vaddr;
134
	unsigned int psize;
135
	real_pte_t rpte;
136
	int i;
L
Linus Torvalds 已提交
137 138 139

	i = batch->index;

140 141 142 143 144
	/* We mask the address for the base page size. Huge pages will
	 * have applied their own masking already
	 */
	addr &= PAGE_MASK;

145 146 147 148 149 150 151
	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
	 * must still exist (and thus the VMA not reused) at the time
	 * of this call
	 */
152 153 154 155 156
	if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
		psize = mmu_huge_psize;
#else
		BUG();
157
		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
158
#endif
159
	} else
160
		psize = pte_pagesize_index(mm, addr, pte);
161

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	/* Build full vaddr */
	if (!is_kernel_addr(addr)) {
		vsid = get_vsid(mm->context.id, addr);
		WARN_ON(vsid == 0);
	} else
		vsid = get_kernel_vsid(addr);
	vaddr = (vsid << 28 ) | (addr & 0x0fffffff);
	rpte = __real_pte(__pte(pte), ptep);

	/*
	 * Check if we have an active batch on this CPU. If not, just
	 * flush now and return. For now, we don global invalidates
	 * in that case, might be worth testing the mm cpu mask though
	 * and decide to use local invalidates instead...
	 */
	if (!batch->active) {
		flush_hash_page(vaddr, rpte, psize, 0);
		return;
	}

L
Linus Torvalds 已提交
182 183 184 185 186 187
	/*
	 * This can happen when we are in the middle of a TLB batch and
	 * we encounter memory pressure (eg copy_page_range when it tries
	 * to allocate a new pte). If we have to reclaim memory and end
	 * up scanning and resetting referenced bits then our batch context
	 * will change mid stream.
188 189 190
	 *
	 * We also need to ensure only one page size is present in a given
	 * batch
L
Linus Torvalds 已提交
191
	 */
192
	if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
193
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
194 195 196 197
		i = 0;
	}
	if (i == 0) {
		batch->mm = mm;
198
		batch->psize = psize;
L
Linus Torvalds 已提交
199
	}
200 201
	batch->pte[i] = rpte;
	batch->vaddr[i] = vaddr;
L
Linus Torvalds 已提交
202 203
	batch->index = ++i;
	if (i >= PPC64_TLB_BATCH_NR)
204
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
205 206
}

207 208 209 210 211 212 213
/*
 * This function is called when terminating an mmu batch or when a batch
 * is full. It will perform the flush of all the entries currently stored
 * in a batch.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
 */
L
Linus Torvalds 已提交
214 215 216
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
	cpumask_t tmp;
217
	int i, local = 0;
L
Linus Torvalds 已提交
218 219

	i = batch->index;
220
	tmp = cpumask_of_cpu(smp_processor_id());
L
Linus Torvalds 已提交
221 222 223
	if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
		local = 1;
	if (i == 1)
224 225
		flush_hash_page(batch->vaddr[0], batch->pte[0],
				batch->psize, local);
L
Linus Torvalds 已提交
226
	else
227
		flush_hash_range(i, local);
L
Linus Torvalds 已提交
228 229 230 231 232
	batch->index = 0;
}

void pte_free_finish(void)
{
H
Hugh Dickins 已提交
233
	/* This is safe since tlb_gather_mmu has disabled preemption */
L
Linus Torvalds 已提交
234 235 236 237 238 239 240
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (*batchp == NULL)
		return;
	pte_free_submit(*batchp);
	*batchp = NULL;
}
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296

/**
 * __flush_hash_table_range - Flush all HPTEs for a given address range
 *                            from the hash table (and the TLB). But keeps
 *                            the linux PTEs intact.
 *
 * @mm		: mm_struct of the target address space (generally init_mm)
 * @start	: starting address
 * @end         : ending address (not included in the flush)
 *
 * This function is mostly to be used by some IO hotplug code in order
 * to remove all hash entries from a given address range used to map IO
 * space on a removed PCI-PCI bidge without tearing down the full mapping
 * since 64K pages may overlap with other bridges when using 64K pages
 * with 4K HW pages on IO space.
 *
 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
 * and is implemented for small size rather than speed.
 */
#ifdef CONFIG_HOTPLUG

void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte(mm->pgd, start);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (!(pte & _PAGE_HASHPTE))
			continue;
		hpte_need_flush(mm, start, ptep, pte, 0);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}

#endif /* CONFIG_HOTPLUG */