tlb.h 7.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  arch/arm/include/asm/tlb.h
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 *  Copyright (C) 2002 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Experimentation shows that on a StrongARM, it appears to be faster
 *  to use the "invalidate whole tlb" rather than "invalidate single
 *  tlb" for this.
 *
 *  This appears true for both the process fork+exit case, as well as
 *  the munmap-large-area case.
 */
#ifndef __ASMARM_TLB_H
#define __ASMARM_TLB_H

#include <asm/cacheflush.h>
21 22 23 24

#ifndef CONFIG_MMU

#include <linux/pagemap.h>
25 26 27

#define tlb_flush(tlb)	((void) tlb)

28 29 30 31
#include <asm-generic/tlb.h>

#else /* !CONFIG_MMU */

32
#include <linux/swap.h>
L
Linus Torvalds 已提交
33
#include <asm/pgalloc.h>
34 35
#include <asm/tlbflush.h>

P
Peter Zijlstra 已提交
36 37
#define MMU_GATHER_BUNDLE	8

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
static inline void __tlb_remove_table(void *_table)
{
	free_page_and_swap_cache((struct page *)_table);
}

struct mmu_table_batch {
	struct rcu_head		rcu;
	unsigned int		nr;
	void			*tables[0];
};

#define MAX_TABLE_BATCH		\
	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))

extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);

#define tlb_remove_entry(tlb, entry)	tlb_remove_table(tlb, entry)
#else
#define tlb_remove_entry(tlb, entry)	tlb_remove_page(tlb, entry)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */

L
Linus Torvalds 已提交
61 62 63 64 65 66
/*
 * TLB handling.  This allows us to remove pages from the page
 * tables, and efficiently handle the TLB issues.
 */
struct mmu_gather {
	struct mm_struct	*mm;
67 68 69 70
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
	struct mmu_table_batch	*batch;
	unsigned int		need_flush;
#endif
L
Linus Torvalds 已提交
71
	unsigned int		fullmm;
72
	struct vm_area_struct	*vma;
73
	unsigned long		start, end;
74 75
	unsigned long		range_start;
	unsigned long		range_end;
76
	unsigned int		nr;
P
Peter Zijlstra 已提交
77 78 79
	unsigned int		max;
	struct page		**pages;
	struct page		*local[MMU_GATHER_BUNDLE];
L
Linus Torvalds 已提交
80 81 82 83
};

DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/*
 * This is unnecessarily complex.  There's three ways the TLB shootdown
 * code is used:
 *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
 *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
 *     tlb->vma will be non-NULL.
 *  2. Unmapping all vmas.  See exit_mmap().
 *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
 *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
 *  3. Unmapping argument pages.  See shift_arg_pages().
 *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
 *     tlb->vma will be NULL.
 */
static inline void tlb_flush(struct mmu_gather *tlb)
{
	if (tlb->fullmm || !tlb->vma)
		flush_tlb_mm(tlb->mm);
	else if (tlb->range_end > 0) {
		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
		tlb->range_start = TASK_SIZE;
		tlb->range_end = 0;
	}
}

static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
	if (!tlb->fullmm) {
		if (addr < tlb->range_start)
			tlb->range_start = addr;
		if (addr + PAGE_SIZE > tlb->range_end)
			tlb->range_end = addr + PAGE_SIZE;
	}
}

P
Peter Zijlstra 已提交
118 119 120 121 122 123 124 125 126 127
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);

	if (addr) {
		tlb->pages = (void *)addr;
		tlb->max = PAGE_SIZE / sizeof(struct page *);
	}
}

128
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
129 130
{
	tlb_flush(tlb);
131 132 133
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
	tlb_table_flush(tlb);
#endif
134 135 136 137
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
138 139 140 141
	free_pages_and_swap_cache(tlb->pages, tlb->nr);
	tlb->nr = 0;
	if (tlb->pages == tlb->local)
		__tlb_alloc_page(tlb);
142 143
}

144 145 146 147 148 149
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	tlb_flush_mmu_tlbonly(tlb);
	tlb_flush_mmu_free(tlb);
}

P
Peter Zijlstra 已提交
150
static inline void
151
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
L
Linus Torvalds 已提交
152 153
{
	tlb->mm = mm;
154 155 156
	tlb->fullmm = !(start | (end+1));
	tlb->start = start;
	tlb->end = end;
157
	tlb->vma = NULL;
P
Peter Zijlstra 已提交
158 159
	tlb->max = ARRAY_SIZE(tlb->local);
	tlb->pages = tlb->local;
160
	tlb->nr = 0;
P
Peter Zijlstra 已提交
161
	__tlb_alloc_page(tlb);
162 163 164 165

#ifdef CONFIG_HAVE_RCU_TABLE_FREE
	tlb->batch = NULL;
#endif
L
Linus Torvalds 已提交
166 167 168 169 170
}

static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
171
	tlb_flush_mmu(tlb);
L
Linus Torvalds 已提交
172 173 174

	/* keep the page table cache within bounds */
	check_pgt_cache();
175

P
Peter Zijlstra 已提交
176 177
	if (tlb->pages != tlb->local)
		free_pages((unsigned long)tlb->pages, 0);
L
Linus Torvalds 已提交
178 179
}

180 181 182 183 184 185
/*
 * Memorize the range for the TLB flush.
 */
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
186
	tlb_add_flush(tlb, addr);
187
}
L
Linus Torvalds 已提交
188

189 190
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
	tlb_remove_tlb_entry(tlb, ptep, address)
L
Linus Torvalds 已提交
191 192 193 194 195 196 197 198
/*
 * In the case of tlb vma handling, we can optimise these away in the
 * case where we're doing a full MM flush.  When we're doing a munmap,
 * the vmas are adjusted to only cover the region to be torn down.
 */
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
199
	if (!tlb->fullmm) {
L
Linus Torvalds 已提交
200
		flush_cache_range(vma, vma->vm_start, vma->vm_end);
201
		tlb->vma = vma;
202 203 204
		tlb->range_start = TASK_SIZE;
		tlb->range_end = 0;
	}
L
Linus Torvalds 已提交
205 206 207 208 209
}

static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
210 211 212 213
	if (!tlb->fullmm)
		tlb_flush(tlb);
}

214
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
215
{
216 217
	if (tlb->nr == tlb->max)
		return true;
P
Peter Zijlstra 已提交
218
	tlb->pages[tlb->nr++] = page;
219
	return false;
P
Peter Zijlstra 已提交
220 221 222 223
}

static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
224
	if (__tlb_remove_page(tlb, page)) {
P
Peter Zijlstra 已提交
225
		tlb_flush_mmu(tlb);
226 227 228 229
		__tlb_remove_page(tlb, page);
	}
}

230 231 232 233 234 235
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
					  struct page *page, int page_size)
{
	return __tlb_remove_page(tlb, page);
}

236 237 238 239
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
					 struct page *page)
{
	return __tlb_remove_page(tlb, page);
240 241
}

242 243 244 245 246 247
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
					struct page *page, int page_size)
{
	return tlb_remove_page(tlb, page);
}

248 249 250 251
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
	unsigned long addr)
{
	pgtable_page_dtor(pte);
252

253 254 255
#ifdef CONFIG_ARM_LPAE
	tlb_add_flush(tlb, addr);
#else
256 257 258 259 260 261 262
	/*
	 * With the classic ARM MMU, a pte page has two corresponding pmd
	 * entries, each covering 1MB.
	 */
	addr &= PMD_MASK;
	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
	tlb_add_flush(tlb, addr + SZ_1M);
263
#endif
264

265
	tlb_remove_entry(tlb, pte);
L
Linus Torvalds 已提交
266 267
}

268 269 270 271 272
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
				  unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
	tlb_add_flush(tlb, addr);
273
	tlb_remove_entry(tlb, virt_to_page(pmdp));
274 275 276
#endif
}

277 278 279 280 281 282
static inline void
tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
	tlb_add_flush(tlb, addr);
}

283
#define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
284
#define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
285
#define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
L
Linus Torvalds 已提交
286 287 288

#define tlb_migrate_finish(mm)		do { } while (0)

289 290 291 292 293 294
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
						     unsigned int page_size)
{
}

295
#endif /* CONFIG_MMU */
L
Linus Torvalds 已提交
296
#endif