tlb.h 4.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef _S390_TLB_H
#define _S390_TLB_H

/*
M
Martin Schwidefsky 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * TLB flushing on s390 is complicated. The following requirement
 * from the principles of operation is the most arduous:
 *
 * "A valid table entry must not be changed while it is attached
 * to any CPU and may be used for translation by that CPU except to
 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
 * table entry, or (3) make a change by means of a COMPARE AND SWAP
 * AND PURGE instruction that purges the TLB."
 *
 * The modification of a pte of an active mm struct therefore is
 * a two step process: i) invalidate the pte, ii) store the new pte.
 * This is true for the page protection bit as well.
 * The only possible optimization is to flush at the beginning of
 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
 *
 * Pages used for the page tables is a different story. FIXME: more
L
Linus Torvalds 已提交
22
 */
M
Martin Schwidefsky 已提交
23 24

#include <linux/mm.h>
25
#include <linux/pagemap.h>
M
Martin Schwidefsky 已提交
26 27 28 29 30 31 32
#include <linux/swap.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>

struct mmu_gather {
	struct mm_struct *mm;
33
	struct mmu_table_batch *batch;
M
Martin Schwidefsky 已提交
34
	unsigned int fullmm;
G
Guenter Roeck 已提交
35
	unsigned long start, end;
M
Martin Schwidefsky 已提交
36 37
};

38 39 40 41 42
struct mmu_table_batch {
	struct rcu_head		rcu;
	unsigned int		nr;
	void			*tables[0];
};
M
Martin Schwidefsky 已提交
43

44 45 46 47 48
#define MAX_TABLE_BATCH		\
	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))

extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
P
Peter Zijlstra 已提交
49 50 51

static inline void tlb_gather_mmu(struct mmu_gather *tlb,
				  struct mm_struct *mm,
52 53
				  unsigned long start,
				  unsigned long end)
P
Peter Zijlstra 已提交
54
{
M
Martin Schwidefsky 已提交
55
	tlb->mm = mm;
56 57 58
	tlb->start = start;
	tlb->end = end;
	tlb->fullmm = !(start | (end+1));
59
	tlb->batch = NULL;
M
Martin Schwidefsky 已提交
60 61
}

62
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
M
Martin Schwidefsky 已提交
63
{
64
	__tlb_flush_mm_lazy(tlb->mm);
65 66 67 68
}

static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
69
	tlb_table_flush(tlb);
M
Martin Schwidefsky 已提交
70 71
}

72 73 74 75 76 77 78

static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
	tlb_flush_mmu_tlbonly(tlb);
	tlb_flush_mmu_free(tlb);
}

M
Martin Schwidefsky 已提交
79 80 81
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
				  unsigned long start, unsigned long end)
{
82
	tlb_flush_mmu(tlb);
M
Martin Schwidefsky 已提交
83
}
L
Linus Torvalds 已提交
84 85

/*
M
Martin Schwidefsky 已提交
86
 * Release the page cache reference for a pte removed by
P
Peter Zijlstra 已提交
87
 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
M
Martin Schwidefsky 已提交
88
 * has already been freed, so just do free_page_and_swap_cache.
L
Linus Torvalds 已提交
89
 */
90
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
P
Peter Zijlstra 已提交
91 92
{
	free_page_and_swap_cache(page);
93
	return false; /* avoid calling tlb_flush_mmu */
P
Peter Zijlstra 已提交
94 95
}

M
Martin Schwidefsky 已提交
96 97 98 99
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
	free_page_and_swap_cache(page);
}
L
Linus Torvalds 已提交
100

101 102 103 104 105
static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
					 struct page *page)
{
	return __tlb_remove_page(tlb, page);
}
M
Martin Schwidefsky 已提交
106 107 108 109
/*
 * pte_free_tlb frees a pte table and clears the CRSTE for the
 * page table from the tlb.
 */
110 111
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
				unsigned long address)
M
Martin Schwidefsky 已提交
112
{
113
	page_table_free_rcu(tlb, (unsigned long *) pte, address);
M
Martin Schwidefsky 已提交
114
}
L
Linus Torvalds 已提交
115

M
Martin Schwidefsky 已提交
116 117 118
/*
 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
 * segment table entry from the tlb.
M
Martin Schwidefsky 已提交
119 120 121
 * If the mm uses a two level page table the single pmd is freed
 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
 * to avoid the double free of the pmd in this case.
M
Martin Schwidefsky 已提交
122
 */
123 124
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
				unsigned long address)
M
Martin Schwidefsky 已提交
125
{
M
Martin Schwidefsky 已提交
126 127
	if (tlb->mm->context.asce_limit <= (1UL << 31))
		return;
128
	pgtable_pmd_page_dtor(virt_to_page(pmd));
129
	tlb_remove_table(tlb, pmd);
M
Martin Schwidefsky 已提交
130 131
}

132 133 134
/*
 * pud_free_tlb frees a pud table and clears the CRSTE for the
 * region third table entry from the tlb.
M
Martin Schwidefsky 已提交
135 136 137
 * If the mm uses a three level page table the single pud is freed
 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
 * to avoid the double free of the pud in this case.
138
 */
139 140
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
				unsigned long address)
141
{
M
Martin Schwidefsky 已提交
142 143
	if (tlb->mm->context.asce_limit <= (1UL << 42))
		return;
144
	tlb_remove_table(tlb, pud);
145
}
M
Martin Schwidefsky 已提交
146

M
Martin Schwidefsky 已提交
147 148 149
#define tlb_start_vma(tlb, vma)			do { } while (0)
#define tlb_end_vma(tlb, vma)			do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr)	do { } while (0)
150
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr)	do { } while (0)
M
Martin Schwidefsky 已提交
151 152 153
#define tlb_migrate_finish(mm)			do { } while (0)

#endif /* _S390_TLB_H */