tlb.h 8.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
#ifndef _ASM_IA64_TLB_H
#define _ASM_IA64_TLB_H
/*
 * Based on <asm-generic/tlb.h>.
 *
 * Copyright (C) 2002-2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 */
/*
 * Removing a translation from a page table (including TLB-shootdown) is a four-step
 * procedure:
 *
 *	(1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
 *	    (this is a no-op on ia64).
 *	(2) Clear the relevant portions of the page-table
 *	(3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
 *	(4) Release the pages that were freed up in step (2).
 *
 * Note that the ordering of these steps is crucial to avoid races on MP machines.
 *
 * The Linux kernel defines several platform-specific hooks for TLB-shootdown.  When
 * unmapping a portion of the virtual address space, these hooks are called according to
 * the following template:
 *
25
 *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
 *	{
 *	  for each vma that needs a shootdown do {
 *	    tlb_start_vma(tlb, vma);
 *	      for each page-table-entry PTE that needs to be removed do {
 *		tlb_remove_tlb_entry(tlb, pte, address);
 *		if (pte refers to a normal page) {
 *		  tlb_remove_page(tlb, page);
 *		}
 *	      }
 *	    tlb_end_vma(tlb, vma);
 *	  }
 *	}
 *	tlb_finish_mmu(tlb, start, end);	// finish unmap for address space MM
 */
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>

#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/machvec.h>

P
Peter Zijlstra 已提交
49 50 51 52 53 54
/*
 * If we can't allocate a page to make a big batch of page pointers
 * to work on, then just handle a few from the on-stack structure.
 */
#define	IA64_GATHER_BUNDLE	8

L
Linus Torvalds 已提交
55 56
struct mmu_gather {
	struct mm_struct	*mm;
57
	unsigned int		nr;
P
Peter Zijlstra 已提交
58
	unsigned int		max;
L
Linus Torvalds 已提交
59 60
	unsigned char		fullmm;		/* non-zero means full mm flush */
	unsigned char		need_flush;	/* really unmapped some PTEs? */
61
	unsigned long		start, end;
L
Linus Torvalds 已提交
62 63
	unsigned long		start_addr;
	unsigned long		end_addr;
P
Peter Zijlstra 已提交
64 65
	struct page		**pages;
	struct page		*local[IA64_GATHER_BUNDLE];
L
Linus Torvalds 已提交
66 67
};

68 69 70 71 72 73 74 75 76 77
struct ia64_tr_entry {
	u64 ifa;
	u64 itir;
	u64 pte;
	u64 rr;
}; /*Record for tr entry!*/

extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);

78
extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

/*
 region register macros
*/
#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
#define RR_VE(val)	(((val) & 0x0000000000000001) << 0)
#define RR_VE_MASK	0x0000000000000001L
#define RR_VE_SHIFT	0
#define RR_TO_PS(val)	(((val) >> 2) & 0x000000000000003f)
#define RR_PS(val)	(((val) & 0x000000000000003f) << 2)
#define RR_PS_MASK	0x00000000000000fcL
#define RR_PS_SHIFT	2
#define RR_RID_MASK	0x00000000ffffff00L
#define RR_TO_RID(val) 	((val >> 8) & 0xffffff)

L
Linus Torvalds 已提交
94
static inline void
95
ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
L
Linus Torvalds 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
{
	tlb->need_flush = 0;

	if (tlb->fullmm) {
		/*
		 * Tearing down the entire address space.  This happens both as a result
		 * of exit() and execve().  The latter case necessitates the call to
		 * flush_tlb_mm() here.
		 */
		flush_tlb_mm(tlb->mm);
	} else if (unlikely (end - start >= 1024*1024*1024*1024UL
			     || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
	{
		/*
		 * If we flush more than a tera-byte or across regions, we're probably
		 * better off just flushing the entire TLB(s).  This should be very rare
		 * and is not worth optimizing for.
		 */
		flush_tlb_all();
	} else {
		/*
		 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
		 * vma pointer.
		 */
		struct vm_area_struct vma;

		vma.vm_mm = tlb->mm;
		/* flush the address range from the tlb: */
		flush_tlb_range(&vma, start, end);
		/* now flush the virt. page-table area mapping the address range: */
		flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
	}

129 130 131 132 133 134 135 136
}

static inline void
ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	unsigned long i;
	unsigned int nr;

L
Linus Torvalds 已提交
137 138
	/* lastly, release the freed pages */
	nr = tlb->nr;
139 140 141 142 143

	tlb->nr = 0;
	tlb->start_addr = ~0UL;
	for (i = 0; i < nr; ++i)
		free_page_and_swap_cache(tlb->pages[i]);
L
Linus Torvalds 已提交
144 145
}

146 147 148 149 150 151 152 153 154 155 156 157 158
/*
 * Flush the TLB for address range START to END and, if not in fast mode, release the
 * freed pages that where gathered up to this point.
 */
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	if (!tlb->need_flush)
		return;
	ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
	ia64_tlb_flush_mmu_free(tlb);
}

P
Peter Zijlstra 已提交
159
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
L
Linus Torvalds 已提交
160
{
P
Peter Zijlstra 已提交
161
	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
L
Linus Torvalds 已提交
162

P
Peter Zijlstra 已提交
163 164 165 166 167 168 169 170
	if (addr) {
		tlb->pages = (void *)addr;
		tlb->max = PAGE_SIZE / sizeof(void *);
	}
}


static inline void
M
Minchan Kim 已提交
171 172
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
			unsigned long start, unsigned long end)
P
Peter Zijlstra 已提交
173
{
L
Linus Torvalds 已提交
174
	tlb->mm = mm;
P
Peter Zijlstra 已提交
175 176
	tlb->max = ARRAY_SIZE(tlb->local);
	tlb->pages = tlb->local;
177
	tlb->nr = 0;
178 179 180
	tlb->fullmm = !(start | (end+1));
	tlb->start = start;
	tlb->end = end;
L
Linus Torvalds 已提交
181 182 183 184 185
	tlb->start_addr = ~0UL;
}

/*
 * Called at the end of the shootdown operation to free up any resources that were
186
 * collected.
L
Linus Torvalds 已提交
187 188
 */
static inline void
M
Minchan Kim 已提交
189
arch_tlb_finish_mmu(struct mmu_gather *tlb,
190
			unsigned long start, unsigned long end, bool force)
L
Linus Torvalds 已提交
191
{
192 193
	if (force)
		tlb->need_flush = 1;
L
Linus Torvalds 已提交
194 195 196 197 198 199 200 201
	/*
	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
	 * tlb->end_addr.
	 */
	ia64_tlb_flush_mmu(tlb, start, end);

	/* keep the page table cache within bounds */
	check_pgt_cache();
202

P
Peter Zijlstra 已提交
203 204
	if (tlb->pages != tlb->local)
		free_pages((unsigned long)tlb->pages, 0);
L
Linus Torvalds 已提交
205 206 207 208 209 210 211
}

/*
 * Logically, this routine frees PAGE.  On MP machines, the actual freeing of the page
 * must be delayed until after the TLB has been flushed (see comments at the beginning of
 * this file).
 */
212
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
L
Linus Torvalds 已提交
213 214 215
{
	tlb->need_flush = 1;

P
Peter Zijlstra 已提交
216 217 218
	if (!tlb->nr && tlb->pages == tlb->local)
		__tlb_alloc_page(tlb);

L
Linus Torvalds 已提交
219
	tlb->pages[tlb->nr++] = page;
220 221 222
	VM_WARN_ON(tlb->nr > tlb->max);
	if (tlb->nr == tlb->max)
		return true;
223
	return false;
P
Peter Zijlstra 已提交
224 225
}

226 227 228 229 230 231 232 233 234 235
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu_free(tlb);
}

P
Peter Zijlstra 已提交
236 237 238 239 240 241 242
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}

static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
243
	if (__tlb_remove_page(tlb, page))
P
Peter Zijlstra 已提交
244
		tlb_flush_mmu(tlb);
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
					  struct page *page, int page_size)
{
	return __tlb_remove_page(tlb, page);
}

static inline void tlb_remove_page_size(struct mmu_gather *tlb,
					struct page *page, int page_size)
{
	return tlb_remove_page(tlb, page);
}

L
Linus Torvalds 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
/*
 * Remove TLB entry for PTE mapped at virtual address ADDRESS.  This is called for any
 * PTE, not just those pointing to (normal) physical memory.
 */
static inline void
__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
{
	if (tlb->start_addr == ~0UL)
		tlb->start_addr = address;
	tlb->end_addr = address + PAGE_SIZE;
}

#define tlb_migrate_finish(mm)	platform_tlb_migrate_finish(mm)

#define tlb_start_vma(tlb, vma)			do { } while (0)
#define tlb_end_vma(tlb, vma)			do { } while (0)

#define tlb_remove_tlb_entry(tlb, ptep, addr)		\
do {							\
	tlb->need_flush = 1;				\
	__tlb_remove_tlb_entry(tlb, ptep, addr);	\
} while (0)

282 283 284
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
	tlb_remove_tlb_entry(tlb, ptep, address)

285 286 287 288 289 290
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
						     unsigned int page_size)
{
}

291
#define pte_free_tlb(tlb, ptep, address)		\
L
Linus Torvalds 已提交
292 293
do {							\
	tlb->need_flush = 1;				\
294
	__pte_free_tlb(tlb, ptep, address);		\
L
Linus Torvalds 已提交
295 296
} while (0)

297
#define pmd_free_tlb(tlb, ptep, address)		\
L
Linus Torvalds 已提交
298 299
do {							\
	tlb->need_flush = 1;				\
300
	__pmd_free_tlb(tlb, ptep, address);		\
L
Linus Torvalds 已提交
301 302
} while (0)

303
#define pud_free_tlb(tlb, pudp, address)		\
L
Linus Torvalds 已提交
304 305
do {							\
	tlb->need_flush = 1;				\
306
	__pud_free_tlb(tlb, pudp, address);		\
L
Linus Torvalds 已提交
307 308 309
} while (0)

#endif /* _ASM_IA64_TLB_H */