提交 8678c1f0 编写于 作者: R Russell King 提交者: Russell King

[ARM] Fix ASID version switch

Close a hole in the ASID version switch, particularly the following
scenario:

CPU0 MM PID			CPU1 MM PID
	idle
				  A	pid(A)
				  A	idle(lazy tlb)
		* new asid version triggered by B *
  B	pid(B)
  A	pid(A)
		* MM A gets new asid version *
  A	idle(lazy tlb)
				  A	pid(A)
		* CPU1 doesn't see the new ASID *

The result is that CPU1 continues running with the hardware set
for the original (stale) ASID value, but mm->context.id contains
the new ASID value.  The result is that the next MM fault on CPU1
updates the page table entries, but flush_tlb_page() fails due to
wrong ASID.

There is a related case with a threaded application is allocated
a new ASID on one CPU while another of its threads is running on
some different CPU.  This scenario is not fixed by this commit.
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 08fdffd4
...@@ -14,7 +14,8 @@ ...@@ -14,7 +14,8 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
unsigned int cpu_last_asid = { 1 << ASID_BITS }; static DEFINE_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
/* /*
* We fork()ed a process, and we need a new context for the child * We fork()ed a process, and we need a new context for the child
...@@ -31,15 +32,16 @@ void __new_context(struct mm_struct *mm) ...@@ -31,15 +32,16 @@ void __new_context(struct mm_struct *mm)
{ {
unsigned int asid; unsigned int asid;
spin_lock(&cpu_asid_lock);
asid = ++cpu_last_asid; asid = ++cpu_last_asid;
if (asid == 0) if (asid == 0)
asid = cpu_last_asid = 1 << ASID_BITS; asid = cpu_last_asid = ASID_FIRST_VERSION;
/* /*
* If we've used up all our ASIDs, we need * If we've used up all our ASIDs, we need
* to start a new version and flush the TLB. * to start a new version and flush the TLB.
*/ */
if ((asid & ~ASID_MASK) == 0) { if (unlikely((asid & ~ASID_MASK) == 0)) {
asid = ++cpu_last_asid; asid = ++cpu_last_asid;
/* set the reserved ASID before flushing the TLB */ /* set the reserved ASID before flushing the TLB */
asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
...@@ -48,6 +50,8 @@ void __new_context(struct mm_struct *mm) ...@@ -48,6 +50,8 @@ void __new_context(struct mm_struct *mm)
isb(); isb();
flush_tlb_all(); flush_tlb_all();
} }
spin_unlock(&cpu_asid_lock);
mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
mm->context.id = asid; mm->context.id = asid;
} }
...@@ -36,8 +36,9 @@ void __check_kvm_seq(struct mm_struct *mm); ...@@ -36,8 +36,9 @@ void __check_kvm_seq(struct mm_struct *mm);
* The context ID is used by debuggers and trace logic, and * The context ID is used by debuggers and trace logic, and
* should be unique within all running processes. * should be unique within all running processes.
*/ */
#define ASID_BITS 8 #define ASID_BITS 8
#define ASID_MASK ((~0) << ASID_BITS) #define ASID_MASK ((~0) << ASID_BITS)
#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid; extern unsigned int cpu_last_asid;
...@@ -96,8 +97,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -96,8 +97,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
if (prev != next) { if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
cpu_set(cpu, next->cpu_vm_mask);
check_context(next); check_context(next);
cpu_switch_mm(next->pgd, next); cpu_switch_mm(next->pgd, next);
if (cache_is_vivt()) if (cache_is_vivt())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册