提交 5ea72a90 编写于 作者: V Vineet Gupta

ARC: [SMP] TLB flush

- Add mm_cpumask setting (aggregating only, unlike some other arches)
  used to restrict the TLB flush cross-calling

- cross-calling versions of TLB flush routines (thanks to Noam)
Signed-off-by: NNoam Camus <noamc@ezchip.com>
Signed-off-by: NVineet Gupta <vgupta@synopsys.com>
上级 63eca94c
...@@ -80,7 +80,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) ...@@ -80,7 +80,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
/* move to new ASID and handle rollover */ /* move to new ASID and handle rollover */
if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) {
flush_tlb_all(); local_flush_tlb_all();
/* /*
* Above checke for rollover of 8 bit ASID in 32 bit container. * Above checke for rollover of 8 bit ASID in 32 bit container.
...@@ -131,6 +131,21 @@ static inline void destroy_context(struct mm_struct *mm) ...@@ -131,6 +131,21 @@ static inline void destroy_context(struct mm_struct *mm)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
const int cpu = smp_processor_id();
/*
* Note that the mm_cpumask is "aggregating" only, we don't clear it
* for the switched-out task, unlike some other arches.
* It is used to enlist cpus for sending TLB flush IPIs and not sending
* it to CPUs where a task once ran-on, could cause stale TLB entry
* re-use, specially for a multi-threaded task.
* e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps.
* For a non-aggregating mm_cpumask, IPI not sent C1, and if T1
* were to re-migrate to C1, it could access the unmapped region
* via any existing stale TLB entries.
*/
cpumask_set_cpu(cpu, mm_cpumask(next));
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
......
...@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); ...@@ -18,11 +18,18 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
void local_flush_tlb_range(struct vm_area_struct *vma, void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
/* XXX: Revisit for SMP */ #ifndef CONFIG_SMP
#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e) #define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e) #define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
#define flush_tlb_all() local_flush_tlb_all() #define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#else
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
#endif /* CONFIG_SMP */
#endif #endif
...@@ -128,6 +128,7 @@ void start_kernel_secondary(void) ...@@ -128,6 +128,7 @@ void start_kernel_secondary(void)
atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_count);
current->active_mm = mm; current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
......
...@@ -363,6 +363,79 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -363,6 +363,79 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags); local_irq_restore(flags);
} }
#ifdef CONFIG_SMP
struct tlb_args {
struct vm_area_struct *ta_vma;
unsigned long ta_start;
unsigned long ta_end;
};
static inline void ipi_flush_tlb_page(void *arg)
{
struct tlb_args *ta = arg;
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
}
static inline void ipi_flush_tlb_range(void *arg)
{
struct tlb_args *ta = arg;
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
}
static inline void ipi_flush_tlb_kernel_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
}
void flush_tlb_all(void)
{
on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
}
void flush_tlb_mm(struct mm_struct *mm)
{
on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
mm, 1);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
struct tlb_args ta = {
.ta_vma = vma,
.ta_start = uaddr
};
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
struct tlb_args ta = {
.ta_vma = vma,
.ta_start = start,
.ta_end = end
};
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct tlb_args ta = {
.ta_start = start,
.ta_end = end
};
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
}
#endif
/* /*
* Routine to create a TLB entry * Routine to create a TLB entry
*/ */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册