提交 9b0de864 编写于 作者: W Will Deacon

arm64: mm: Invalidate both kernel and user ASIDs when performing TLBI

Since an mm has both a kernel and a user ASID, we need to ensure that
broadcast TLB maintenance targets both address spaces so that things
like CoW continue to work with the uaccess primitives in the kernel.
Reviewed-by: NMark Rutland <mark.rutland@arm.com>
Tested-by: NLaura Abbott <labbott@redhat.com>
Tested-by: NShanker Donthineni <shankerd@codeaurora.org>
Signed-off-by: NWill Deacon <will.deacon@arm.com>
上级 fc0e1299
......@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <asm/cputype.h>
#include <asm/mmu.h>
/*
* Raw TLBI operations.
......@@ -54,6 +55,11 @@
#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
#define __tlbi_user(op, arg) do { \
if (arm64_kernel_unmapped_at_el0()) \
__tlbi(op, (arg) | USER_ASID_FLAG); \
} while (0)
/*
* TLB Management
* ==============
......@@ -115,6 +121,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
dsb(ishst);
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
dsb(ish);
}
......@@ -125,6 +132,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
dsb(ishst);
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
dsb(ish);
}
......@@ -151,10 +159,13 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
if (last_level)
if (last_level) {
__tlbi(vale1is, addr);
else
__tlbi_user(vale1is, addr);
} else {
__tlbi(vae1is, addr);
__tlbi_user(vae1is, addr);
}
}
dsb(ish);
}
......@@ -194,6 +205,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
__tlbi(vae1is, addr);
__tlbi_user(vae1is, addr);
dsb(ish);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册