提交 5c474a1e 编写于 作者: M Martin Schwidefsky

s390/mm: introduce ptep_flush_lazy helper

Isolate the logic of IDTE vs. IPTE flushing of ptes in two functions,
ptep_flush_lazy and __tlb_flush_mm_lazy.
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 b6bed093
...@@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
WARN_ON(atomic_read(&prev->context.attach_count) < 0); WARN_ON(atomic_read(&prev->context.attach_count) < 0);
atomic_inc(&next->context.attach_count); atomic_inc(&next->context.attach_count);
/* Check for TLBs not flushed yet */ /* Check for TLBs not flushed yet */
if (next->context.flush_mm) __tlb_flush_mm_lazy(next);
__tlb_flush_mm(next);
} }
#define enter_lazy_tlb(mm,tsk) do { } while (0) #define enter_lazy_tlb(mm,tsk) do { } while (0)
......
...@@ -414,12 +414,6 @@ extern unsigned long MODULES_END; ...@@ -414,12 +414,6 @@ extern unsigned long MODULES_END;
#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT) #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT)
#define SEGMENT_WRITE __pgprot(0) #define SEGMENT_WRITE __pgprot(0)
static inline int mm_exclusive(struct mm_struct *mm)
{
return likely(mm == current->active_mm &&
atomic_read(&mm->context.attach_count) <= 1);
}
static inline int mm_has_pgste(struct mm_struct *mm) static inline int mm_has_pgste(struct mm_struct *mm)
{ {
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
...@@ -1037,6 +1031,17 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep) ...@@ -1037,6 +1031,17 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
} }
} }
static inline void ptep_flush_lazy(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
int active = (mm == current->active_mm) ? 1 : 0;
if (atomic_read(&mm->context.attach_count) > active)
__ptep_ipte(address, ptep);
else
mm->context.flush_mm = 1;
}
/* /*
* This is hard to understand. ptep_get_and_clear and ptep_clear_flush * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
* both clear the TLB for the unmapped pte. The reason is that * both clear the TLB for the unmapped pte. The reason is that
...@@ -1057,15 +1062,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, ...@@ -1057,15 +1062,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
pgste_t pgste; pgste_t pgste;
pte_t pte; pte_t pte;
mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep); pgste = pgste_get_lock(ptep);
pgste = pgste_ipte_notify(mm, address, ptep, pgste); pgste = pgste_ipte_notify(mm, address, ptep, pgste);
} }
pte = *ptep; pte = *ptep;
if (!mm_exclusive(mm)) ptep_flush_lazy(mm, address, ptep);
__ptep_ipte(address, ptep);
pte_val(*ptep) = _PAGE_INVALID; pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
...@@ -1083,15 +1086,13 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, ...@@ -1083,15 +1086,13 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
pgste_t pgste; pgste_t pgste;
pte_t pte; pte_t pte;
mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep); pgste = pgste_get_lock(ptep);
pgste_ipte_notify(mm, address, ptep, pgste); pgste_ipte_notify(mm, address, ptep, pgste);
} }
pte = *ptep; pte = *ptep;
if (!mm_exclusive(mm)) ptep_flush_lazy(mm, address, ptep);
__ptep_ipte(address, ptep);
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste); pgste = pgste_update_all(&pte, pgste);
...@@ -1160,7 +1161,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, ...@@ -1160,7 +1161,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
pte = *ptep; pte = *ptep;
if (!full) if (!full)
__ptep_ipte(address, ptep); ptep_flush_lazy(mm, address, ptep);
pte_val(*ptep) = _PAGE_INVALID; pte_val(*ptep) = _PAGE_INVALID;
if (!full && mm_has_pgste(mm)) { if (!full && mm_has_pgste(mm)) {
...@@ -1178,14 +1179,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, ...@@ -1178,14 +1179,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
pte_t pte = *ptep; pte_t pte = *ptep;
if (pte_write(pte)) { if (pte_write(pte)) {
mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep); pgste = pgste_get_lock(ptep);
pgste = pgste_ipte_notify(mm, address, ptep, pgste); pgste = pgste_ipte_notify(mm, address, ptep, pgste);
} }
if (!mm_exclusive(mm)) ptep_flush_lazy(mm, address, ptep);
__ptep_ipte(address, ptep);
pte = pte_wrprotect(pte); pte = pte_wrprotect(pte);
if (mm_has_pgste(mm)) { if (mm_has_pgste(mm)) {
......
...@@ -63,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, ...@@ -63,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{ {
__tlb_flush_mm_lazy(tlb->mm);
tlb_table_flush(tlb); tlb_table_flush(tlb);
} }
static inline void tlb_finish_mmu(struct mmu_gather *tlb, static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
tlb_table_flush(tlb); tlb_flush_mmu(tlb);
} }
/* /*
......
...@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) ...@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
__tlb_flush_full(mm); __tlb_flush_full(mm);
} }
static inline void __tlb_flush_mm_cond(struct mm_struct * mm) static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{ {
if (mm->context.flush_mm) { if (mm->context.flush_mm) {
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
...@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm) ...@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_mm(struct mm_struct *mm)
{ {
__tlb_flush_mm_cond(mm); __tlb_flush_mm_lazy(mm);
} }
static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
__tlb_flush_mm_cond(vma->vm_mm); __tlb_flush_mm_lazy(vma->vm_mm);
} }
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
......
...@@ -1008,7 +1008,6 @@ void tlb_table_flush(struct mmu_gather *tlb) ...@@ -1008,7 +1008,6 @@ void tlb_table_flush(struct mmu_gather *tlb)
struct mmu_table_batch **batch = &tlb->batch; struct mmu_table_batch **batch = &tlb->batch;
if (*batch) { if (*batch) {
__tlb_flush_mm(tlb->mm);
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL; *batch = NULL;
} }
...@@ -1018,11 +1017,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) ...@@ -1018,11 +1017,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
{ {
struct mmu_table_batch **batch = &tlb->batch; struct mmu_table_batch **batch = &tlb->batch;
tlb->mm->context.flush_mm = 1;
if (*batch == NULL) { if (*batch == NULL) {
*batch = (struct mmu_table_batch *) *batch = (struct mmu_table_batch *)
__get_free_page(GFP_NOWAIT | __GFP_NOWARN); __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) { if (*batch == NULL) {
__tlb_flush_mm(tlb->mm); __tlb_flush_mm_lazy(tlb->mm);
tlb_remove_table_one(table); tlb_remove_table_one(table);
return; return;
} }
...@@ -1030,7 +1030,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) ...@@ -1030,7 +1030,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
} }
(*batch)->tables[(*batch)->nr++] = table; (*batch)->tables[(*batch)->nr++] = table;
if ((*batch)->nr == MAX_TABLE_BATCH) if ((*batch)->nr == MAX_TABLE_BATCH)
tlb_table_flush(tlb); tlb_flush_mmu(tlb);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册