tlb_hash64.c 6.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * This file contains the routines for flushing entries from the
 * TLB and MMU hash table.
 *
 *  Derived from arch/ppc64/mm/init.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */
23

L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
32
#include <asm/bug.h>
L
Linus Torvalds 已提交
33 34 35 36

DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);

/* This is declared as we are using the more or less generic
37
 * arch/powerpc/include/asm/tlb.h file -- tgall
L
Linus Torvalds 已提交
38 39 40 41
 */
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);

/*
42 43 44 45 46 47
 * A linux PTE was changed and the corresponding hash table entry
 * neesd to be flushed. This function will either perform the flush
 * immediately or will batch it up if the current CPU has an active
 * batch on it.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
L
Linus Torvalds 已提交
48
 */
49 50
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, unsigned long pte, int huge)
L
Linus Torvalds 已提交
51 52
{
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
53
	unsigned long vsid, vaddr;
54
	unsigned int psize;
P
Paul Mackerras 已提交
55
	int ssize;
56
	real_pte_t rpte;
57
	int i;
L
Linus Torvalds 已提交
58 59 60

	i = batch->index;

61 62 63 64 65
	/* We mask the address for the base page size. Huge pages will
	 * have applied their own masking already
	 */
	addr &= PAGE_MASK;

66 67 68 69 70 71 72
	/* Get page size (maybe move back to caller).
	 *
	 * NOTE: when using special 64K mappings in 4K environment like
	 * for SPEs, we obtain the page size from the slice, which thus
	 * must still exist (and thus the VMA not reused) at the time
	 * of this call
	 */
73 74
	if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
75
		psize = get_slice_psize(mm, addr);
76 77
#else
		BUG();
78
		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
79
#endif
80
	} else
81
		psize = pte_pagesize_index(mm, addr, pte);
82

83 84
	/* Build full vaddr */
	if (!is_kernel_addr(addr)) {
P
Paul Mackerras 已提交
85 86
		ssize = user_segment_size(addr);
		vsid = get_vsid(mm->context.id, addr, ssize);
87
		WARN_ON(vsid == 0);
P
Paul Mackerras 已提交
88 89 90 91 92
	} else {
		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
		ssize = mmu_kernel_ssize;
	}
	vaddr = hpt_va(addr, vsid, ssize);
93 94 95 96 97 98 99 100 101
	rpte = __real_pte(__pte(pte), ptep);

	/*
	 * Check if we have an active batch on this CPU. If not, just
	 * flush now and return. For now, we don global invalidates
	 * in that case, might be worth testing the mm cpu mask though
	 * and decide to use local invalidates instead...
	 */
	if (!batch->active) {
P
Paul Mackerras 已提交
102
		flush_hash_page(vaddr, rpte, psize, ssize, 0);
103 104 105
		return;
	}

L
Linus Torvalds 已提交
106 107 108 109 110 111
	/*
	 * This can happen when we are in the middle of a TLB batch and
	 * we encounter memory pressure (eg copy_page_range when it tries
	 * to allocate a new pte). If we have to reclaim memory and end
	 * up scanning and resetting referenced bits then our batch context
	 * will change mid stream.
112 113 114
	 *
	 * We also need to ensure only one page size is present in a given
	 * batch
L
Linus Torvalds 已提交
115
	 */
P
Paul Mackerras 已提交
116 117
	if (i != 0 && (mm != batch->mm || batch->psize != psize ||
		       batch->ssize != ssize)) {
118
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
119 120 121 122
		i = 0;
	}
	if (i == 0) {
		batch->mm = mm;
123
		batch->psize = psize;
P
Paul Mackerras 已提交
124
		batch->ssize = ssize;
L
Linus Torvalds 已提交
125
	}
126 127
	batch->pte[i] = rpte;
	batch->vaddr[i] = vaddr;
L
Linus Torvalds 已提交
128 129
	batch->index = ++i;
	if (i >= PPC64_TLB_BATCH_NR)
130
		__flush_tlb_pending(batch);
L
Linus Torvalds 已提交
131 132
}

133 134 135 136 137 138 139
/*
 * This function is called when terminating an mmu batch or when a batch
 * is full. It will perform the flush of all the entries currently stored
 * in a batch.
 *
 * Must be called from within some kind of spinlock/non-preempt region...
 */
L
Linus Torvalds 已提交
140 141
void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
{
142
	const struct cpumask *tmp;
143
	int i, local = 0;
L
Linus Torvalds 已提交
144 145

	i = batch->index;
146 147
	tmp = cpumask_of(smp_processor_id());
	if (cpumask_equal(mm_cpumask(batch->mm), tmp))
L
Linus Torvalds 已提交
148 149
		local = 1;
	if (i == 1)
150
		flush_hash_page(batch->vaddr[0], batch->pte[0],
P
Paul Mackerras 已提交
151
				batch->psize, batch->ssize, local);
L
Linus Torvalds 已提交
152
	else
153
		flush_hash_range(i, local);
L
Linus Torvalds 已提交
154 155 156
	batch->index = 0;
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/**
 * __flush_hash_table_range - Flush all HPTEs for a given address range
 *                            from the hash table (and the TLB). But keeps
 *                            the linux PTEs intact.
 *
 * @mm		: mm_struct of the target address space (generally init_mm)
 * @start	: starting address
 * @end         : ending address (not included in the flush)
 *
 * This function is mostly to be used by some IO hotplug code in order
 * to remove all hash entries from a given address range used to map IO
 * space on a removed PCI-PCI bidge without tearing down the full mapping
 * since 64K pages may overlap with other bridges when using 64K pages
 * with 4K HW pages on IO space.
 *
 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG
 * and is implemented for small size rather than speed.
 */
#ifdef CONFIG_HOTPLUG

void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
			      unsigned long end)
{
	unsigned long flags;

	start = _ALIGN_DOWN(start, PAGE_SIZE);
	end = _ALIGN_UP(end, PAGE_SIZE);

	BUG_ON(!mm->pgd);

	/* Note: Normally, we should only ever use a batch within a
	 * PTE locked section. This violates the rule, but will work
	 * since we don't actually modify the PTEs, we just flush the
	 * hash while leaving the PTEs intact (including their reference
	 * to being hashed). This is not the most performance oriented
	 * way to do things but is fine for our needs here.
	 */
	local_irq_save(flags);
	arch_enter_lazy_mmu_mode();
	for (; start < end; start += PAGE_SIZE) {
		pte_t *ptep = find_linux_pte(mm->pgd, start);
		unsigned long pte;

		if (ptep == NULL)
			continue;
		pte = pte_val(*ptep);
		if (!(pte & _PAGE_HASHPTE))
			continue;
		hpte_need_flush(mm, start, ptep, pte, 0);
	}
	arch_leave_lazy_mmu_mode();
	local_irq_restore(flags);
}

#endif /* CONFIG_HOTPLUG */