tlb.h 3.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
#ifndef __ASM_SH_TLB_H
#define __ASM_SH_TLB_H

4 5 6 7 8
#ifdef CONFIG_SUPERH64
# include "tlb_64.h"
#endif

#ifndef __ASSEMBLY__
9 10 11 12 13
#include <linux/pagemap.h>

#ifdef CONFIG_MMU
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
14
#include <asm/mmu_context.h>
15 16 17 18 19 20 21 22 23 24

/*
 * TLB handling.  This allows us to remove pages from the page
 * tables, and efficiently handle the TLB issues.
 */
struct mmu_gather {
	struct mm_struct	*mm;
	unsigned int		fullmm;
	unsigned long		start, end;
};
25

26
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
L
Linus Torvalds 已提交
27

28 29 30 31
static inline void init_tlb_gather(struct mmu_gather *tlb)
{
	tlb->start = TASK_SIZE;
	tlb->end = 0;
L
Linus Torvalds 已提交
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
	if (tlb->fullmm) {
		tlb->start = 0;
		tlb->end = TASK_SIZE;
	}
}

static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
	struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);

	tlb->mm = mm;
	tlb->fullmm = full_mm_flush;

	init_tlb_gather(tlb);

	return tlb;
}

static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
	if (tlb->fullmm)
		flush_tlb_mm(tlb->mm);

	/* keep the page table cache within bounds */
	check_pgt_cache();

	put_cpu_var(mmu_gathers);
}

static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
{
	if (tlb->start > address)
		tlb->start = address;
	if (tlb->end < address + PAGE_SIZE)
		tlb->end = address + PAGE_SIZE;
}
L
Linus Torvalds 已提交
72 73

/*
74 75 76
 * In the case of tlb vma handling, we can optimise these away in the
 * case where we're doing a full MM flush.  When we're doing a munmap,
 * the vmas are adjusted to only cover the region to be torn down.
L
Linus Torvalds 已提交
77
 */
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	if (!tlb->fullmm)
		flush_cache_range(vma, vma->vm_start, vma->vm_end);
}

static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	if (!tlb->fullmm && tlb->end) {
		flush_tlb_range(vma, tlb->start, tlb->end);
		init_tlb_gather(tlb);
	}
}

#define tlb_remove_page(tlb,page)	free_page_and_swap_cache(page)
95 96 97
#define pte_free_tlb(tlb, ptep, addr)	pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp, addr)	pmd_free((tlb)->mm, pmdp)
#define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
98 99 100

#define tlb_migrate_finish(mm)		do { } while (0)

101 102 103
#ifdef CONFIG_CPU_SH4
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
extern void tlb_unwire_entry(void);
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
#elif defined(CONFIG_SUPERH64)
static int dtlb_entry;
static unsigned long long dtlb_entries[64];

static inline void tlb_wire_entry(struct vm_area_struct *vma,
				  unsigned long addr, pte_t pte)
{
	unsigned long long entry;
	unsigned long paddr, flags;

	BUG_ON(dtlb_entry == 64);

	local_irq_save(flags);

	entry = sh64_get_wired_dtlb_entry();
	dtlb_entries[dtlb_entry++] = entry;

	paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
	paddr &= ~PAGE_MASK;

	sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);

	local_irq_restore(flags);
}

static inline void tlb_unwire_entry(void)
{
	unsigned long long entry;
	unsigned long flags;

	BUG_ON(!dtlb_entry);

	local_irq_save(flags);
	entry = dtlb_entries[dtlb_entry--];

	sh64_teardown_tlb_slot(entry);
	sh64_put_wired_dtlb_entry(entry);

	local_irq_restore(flags);
}
144 145 146 147 148 149 150 151 152 153 154 155 156
#else
static inline void tlb_wire_entry(struct vm_area_struct *vma ,
				  unsigned long addr, pte_t pte)
{
	BUG();
}

static inline void tlb_unwire_entry(void)
{
	BUG();
}
#endif /* CONFIG_CPU_SH4 */

157 158 159 160 161 162
#else /* CONFIG_MMU */

#define tlb_start_vma(tlb, vma)				do { } while (0)
#define tlb_end_vma(tlb, vma)				do { } while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address)	do { } while (0)
#define tlb_flush(tlb)					do { } while (0)
L
Linus Torvalds 已提交
163 164

#include <asm-generic/tlb.h>
165

166
#endif /* CONFIG_MMU */
167 168
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TLB_H */