tlb.h 8.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#ifndef _ASM_IA64_TLB_H
#define _ASM_IA64_TLB_H
/*
 * Based on <asm-generic/tlb.h>.
 *
 * Copyright (C) 2002-2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 */
/*
 * Removing a translation from a page table (including TLB-shootdown) is a four-step
 * procedure:
 *
 *	(1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
 *	    (this is a no-op on ia64).
 *	(2) Clear the relevant portions of the page-table
 *	(3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
 *	(4) Release the pages that were freed up in step (2).
 *
 * Note that the ordering of these steps is crucial to avoid races on MP machines.
 *
 * The Linux kernel defines several platform-specific hooks for TLB-shootdown.  When
 * unmapping a portion of the virtual address space, these hooks are called according to
 * the following template:
 *
26
 *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
L
Linus Torvalds 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
 *	{
 *	  for each vma that needs a shootdown do {
 *	    tlb_start_vma(tlb, vma);
 *	      for each page-table-entry PTE that needs to be removed do {
 *		tlb_remove_tlb_entry(tlb, pte, address);
 *		if (pte refers to a normal page) {
 *		  tlb_remove_page(tlb, page);
 *		}
 *	      }
 *	    tlb_end_vma(tlb, vma);
 *	  }
 *	}
 *	tlb_finish_mmu(tlb, start, end);	// finish unmap for address space MM
 */
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>

#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/machvec.h>

P
Peter Zijlstra 已提交
50 51 52 53 54 55
/*
 * If we can't allocate a page to make a big batch of page pointers
 * to work on, then just handle a few from the on-stack structure.
 */
#define	IA64_GATHER_BUNDLE	8

L
Linus Torvalds 已提交
56 57
struct mmu_gather {
	struct mm_struct	*mm;
58
	unsigned int		nr;
P
Peter Zijlstra 已提交
59
	unsigned int		max;
L
Linus Torvalds 已提交
60 61
	unsigned char		fullmm;		/* non-zero means full mm flush */
	unsigned char		need_flush;	/* really unmapped some PTEs? */
62
	unsigned long		start, end;
L
Linus Torvalds 已提交
63 64
	unsigned long		start_addr;
	unsigned long		end_addr;
P
Peter Zijlstra 已提交
65 66
	struct page		**pages;
	struct page		*local[IA64_GATHER_BUNDLE];
L
Linus Torvalds 已提交
67 68
};

69 70 71 72 73 74 75 76 77 78
struct ia64_tr_entry {
	u64 ifa;
	u64 itir;
	u64 pte;
	u64 rr;
}; /*Record for tr entry!*/

extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);

79
extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94

/*
 region register macros
*/
#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
#define RR_VE(val)	(((val) & 0x0000000000000001) << 0)
#define RR_VE_MASK	0x0000000000000001L
#define RR_VE_SHIFT	0
#define RR_TO_PS(val)	(((val) >> 2) & 0x000000000000003f)
#define RR_PS(val)	(((val) & 0x000000000000003f) << 2)
#define RR_PS_MASK	0x00000000000000fcL
#define RR_PS_SHIFT	2
#define RR_RID_MASK	0x00000000ffffff00L
#define RR_TO_RID(val) 	((val >> 8) & 0xffffff)

L
Linus Torvalds 已提交
95
static inline void
96
ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
L
Linus Torvalds 已提交
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
{
	tlb->need_flush = 0;

	if (tlb->fullmm) {
		/*
		 * Tearing down the entire address space.  This happens both as a result
		 * of exit() and execve().  The latter case necessitates the call to
		 * flush_tlb_mm() here.
		 */
		flush_tlb_mm(tlb->mm);
	} else if (unlikely (end - start >= 1024*1024*1024*1024UL
			     || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
	{
		/*
		 * If we flush more than a tera-byte or across regions, we're probably
		 * better off just flushing the entire TLB(s).  This should be very rare
		 * and is not worth optimizing for.
		 */
		flush_tlb_all();
	} else {
		/*
		 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
		 * vma pointer.
		 */
		struct vm_area_struct vma;

		vma.vm_mm = tlb->mm;
		/* flush the address range from the tlb: */
		flush_tlb_range(&vma, start, end);
		/* now flush the virt. page-table area mapping the address range: */
		flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
	}

130 131 132 133 134 135 136 137
}

static inline void
ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	unsigned long i;
	unsigned int nr;

L
Linus Torvalds 已提交
138 139
	/* lastly, release the freed pages */
	nr = tlb->nr;
140 141 142 143 144

	tlb->nr = 0;
	tlb->start_addr = ~0UL;
	for (i = 0; i < nr; ++i)
		free_page_and_swap_cache(tlb->pages[i]);
L
Linus Torvalds 已提交
145 146
}

147 148 149 150 151 152 153 154 155 156 157 158 159
/*
 * Flush the TLB for address range START to END and, if not in fast mode, release the
 * freed pages that where gathered up to this point.
 */
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	if (!tlb->need_flush)
		return;
	ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
	ia64_tlb_flush_mmu_free(tlb);
}

P
Peter Zijlstra 已提交
160
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
L
Linus Torvalds 已提交
161
{
P
Peter Zijlstra 已提交
162
	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
L
Linus Torvalds 已提交
163

P
Peter Zijlstra 已提交
164 165 166 167 168 169 170 171
	if (addr) {
		tlb->pages = (void *)addr;
		tlb->max = PAGE_SIZE / sizeof(void *);
	}
}


static inline void
M
Minchan Kim 已提交
172 173
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
			unsigned long start, unsigned long end)
P
Peter Zijlstra 已提交
174
{
L
Linus Torvalds 已提交
175
	tlb->mm = mm;
P
Peter Zijlstra 已提交
176 177
	tlb->max = ARRAY_SIZE(tlb->local);
	tlb->pages = tlb->local;
178
	tlb->nr = 0;
179 180 181
	tlb->fullmm = !(start | (end+1));
	tlb->start = start;
	tlb->end = end;
L
Linus Torvalds 已提交
182 183 184 185 186
	tlb->start_addr = ~0UL;
}

/*
 * Called at the end of the shootdown operation to free up any resources that were
187
 * collected.
L
Linus Torvalds 已提交
188 189
 */
static inline void
M
Minchan Kim 已提交
190
arch_tlb_finish_mmu(struct mmu_gather *tlb,
191
			unsigned long start, unsigned long end, bool force)
L
Linus Torvalds 已提交
192
{
193 194
	if (force)
		tlb->need_flush = 1;
L
Linus Torvalds 已提交
195 196 197 198 199 200 201 202
	/*
	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
	 * tlb->end_addr.
	 */
	ia64_tlb_flush_mmu(tlb, start, end);

	/* keep the page table cache within bounds */
	check_pgt_cache();
203

P
Peter Zijlstra 已提交
204 205
	if (tlb->pages != tlb->local)
		free_pages((unsigned long)tlb->pages, 0);
L
Linus Torvalds 已提交
206 207 208 209 210 211 212
}

/*
 * Logically, this routine frees PAGE.  On MP machines, the actual freeing of the page
 * must be delayed until after the TLB has been flushed (see comments at the beginning of
 * this file).
 */
213
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
L
Linus Torvalds 已提交
214 215 216
{
	tlb->need_flush = 1;

P
Peter Zijlstra 已提交
217 218 219
	if (!tlb->nr && tlb->pages == tlb->local)
		__tlb_alloc_page(tlb);

L
Linus Torvalds 已提交
220
	tlb->pages[tlb->nr++] = page;
221 222 223
	VM_WARN_ON(tlb->nr > tlb->max);
	if (tlb->nr == tlb->max)
		return true;
224
	return false;
P
Peter Zijlstra 已提交
225 226
}

227 228 229 230 231 232 233 234 235 236
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu_free(tlb);
}

P
Peter Zijlstra 已提交
237 238 239 240 241 242 243
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}

static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
244
	if (__tlb_remove_page(tlb, page))
P
Peter Zijlstra 已提交
245
		tlb_flush_mmu(tlb);
246 247
}

248 249 250 251 252 253 254 255 256 257 258 259
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
					  struct page *page, int page_size)
{
	return __tlb_remove_page(tlb, page);
}

static inline void tlb_remove_page_size(struct mmu_gather *tlb,
					struct page *page, int page_size)
{
	return tlb_remove_page(tlb, page);
}

L
Linus Torvalds 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
/*
 * Remove TLB entry for PTE mapped at virtual address ADDRESS.  This is called for any
 * PTE, not just those pointing to (normal) physical memory.
 */
static inline void
__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
{
	if (tlb->start_addr == ~0UL)
		tlb->start_addr = address;
	tlb->end_addr = address + PAGE_SIZE;
}

#define tlb_migrate_finish(mm)	platform_tlb_migrate_finish(mm)

#define tlb_start_vma(tlb, vma)			do { } while (0)
#define tlb_end_vma(tlb, vma)			do { } while (0)

#define tlb_remove_tlb_entry(tlb, ptep, addr)		\
do {							\
	tlb->need_flush = 1;				\
	__tlb_remove_tlb_entry(tlb, ptep, addr);	\
} while (0)

283 284 285
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
	tlb_remove_tlb_entry(tlb, ptep, address)

286 287 288 289 290 291
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
						     unsigned int page_size)
{
}

292
#define pte_free_tlb(tlb, ptep, address)		\
L
Linus Torvalds 已提交
293 294
do {							\
	tlb->need_flush = 1;				\
295
	__pte_free_tlb(tlb, ptep, address);		\
L
Linus Torvalds 已提交
296 297
} while (0)

298
#define pmd_free_tlb(tlb, ptep, address)		\
L
Linus Torvalds 已提交
299 300
do {							\
	tlb->need_flush = 1;				\
301
	__pmd_free_tlb(tlb, ptep, address);		\
L
Linus Torvalds 已提交
302 303
} while (0)

304
#define pud_free_tlb(tlb, pudp, address)		\
L
Linus Torvalds 已提交
305 306
do {							\
	tlb->need_flush = 1;				\
307
	__pud_free_tlb(tlb, pudp, address);		\
L
Linus Torvalds 已提交
308 309 310
} while (0)

#endif /* _ASM_IA64_TLB_H */