提交 56f8ba83 编写于 作者: R Rusty Russell

cpumask: use mm_cpumask() wrapper: arm

Makes code futureproof against the impending change to mm->cpu_vm_mask.

It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
上级 a6a01063
...@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) ...@@ -334,14 +334,14 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
#ifndef CONFIG_CPU_CACHE_VIPT #ifndef CONFIG_CPU_CACHE_VIPT
static inline void flush_cache_mm(struct mm_struct *mm) static inline void flush_cache_mm(struct mm_struct *mm)
{ {
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all(); __cpuc_flush_user_all();
} }
static inline void static inline void
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags); vma->vm_flags);
} }
...@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long ...@@ -349,7 +349,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
static inline void static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{ {
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK; unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
} }
...@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page, ...@@ -360,7 +360,7 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *kaddr, unsigned long uaddr, void *kaddr,
unsigned long len, int write) unsigned long len, int write)
{ {
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr; unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len); __cpuc_coherent_kern_range(addr, addr + len);
} }
......
...@@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -103,14 +103,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* check for possible thread migration */ /* check for possible thread migration */
if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask)) if (!cpumask_empty(mm_cpumask(next)) &&
!cpumask_test_cpu(cpu, mm_cpumask(next)))
__flush_icache_all(); __flush_icache_all();
#endif #endif
if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) { if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
check_context(next); check_context(next);
cpu_switch_mm(next->pgd, next); cpu_switch_mm(next->pgd, next);
if (cache_is_vivt()) if (cache_is_vivt())
cpu_clear(cpu, prev->cpu_vm_mask); cpumask_clear_cpu(cpu, mm_cpumask(prev));
} }
#endif #endif
} }
......
...@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) ...@@ -350,7 +350,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
dsb(); dsb();
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
if (tlb_flag(TLB_V3_FULL)) if (tlb_flag(TLB_V3_FULL))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc");
if (tlb_flag(TLB_V4_U_FULL)) if (tlb_flag(TLB_V4_U_FULL))
...@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) ...@@ -388,7 +388,7 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB)) if (tlb_flag(TLB_WB))
dsb(); dsb();
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
if (tlb_flag(TLB_V3_PAGE)) if (tlb_flag(TLB_V3_PAGE))
asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc");
if (tlb_flag(TLB_V4_U_PAGE)) if (tlb_flag(TLB_V4_U_PAGE))
......
...@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void) ...@@ -189,7 +189,7 @@ int __cpuexit __cpu_disable(void)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
for_each_process(p) { for_each_process(p) {
if (p->mm) if (p->mm)
cpu_clear(cpu, p->mm->cpu_vm_mask); cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
...@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) ...@@ -257,7 +257,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
atomic_inc(&mm->mm_users); atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_count);
current->active_mm = mm; current->active_mm = mm;
cpu_set(cpu, mm->cpu_vm_mask); cpumask_set_cpu(cpu, mm_cpumask(mm));
cpu_switch_mm(mm->pgd, mm); cpu_switch_mm(mm->pgd, mm);
enter_lazy_tlb(mm, current); enter_lazy_tlb(mm, current);
local_flush_tlb_all(); local_flush_tlb_all();
...@@ -643,7 +643,7 @@ void flush_tlb_all(void) ...@@ -643,7 +643,7 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
if (tlb_ops_need_broadcast()) if (tlb_ops_need_broadcast())
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
else else
local_flush_tlb_mm(mm); local_flush_tlb_mm(mm);
} }
...@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) ...@@ -654,7 +654,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
struct tlb_args ta; struct tlb_args ta;
ta.ta_vma = vma; ta.ta_vma = vma;
ta.ta_start = uaddr; ta.ta_start = uaddr;
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
} else } else
local_flush_tlb_page(vma, uaddr); local_flush_tlb_page(vma, uaddr);
} }
...@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma, ...@@ -677,7 +677,7 @@ void flush_tlb_range(struct vm_area_struct *vma,
ta.ta_vma = vma; ta.ta_vma = vma;
ta.ta_start = start; ta.ta_start = start;
ta.ta_end = end; ta.ta_end = end;
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
} else } else
local_flush_tlb_range(vma, start, end); local_flush_tlb_range(vma, start, end);
} }
......
...@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm) ...@@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm)
} }
spin_unlock(&cpu_asid_lock); spin_unlock(&cpu_asid_lock);
mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
mm->context.id = asid; mm->context.id = asid;
} }
...@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) ...@@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
void flush_cache_mm(struct mm_struct *mm) void flush_cache_mm(struct mm_struct *mm)
{ {
if (cache_is_vivt()) { if (cache_is_vivt()) {
if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_all(); __cpuc_flush_user_all();
return; return;
} }
...@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
if (cache_is_vivt()) { if (cache_is_vivt()) {
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags); vma->vm_flags);
return; return;
...@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned ...@@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{ {
if (cache_is_vivt()) { if (cache_is_vivt()) {
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = user_addr & PAGE_MASK; unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
} }
...@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, ...@@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long len, int write) unsigned long len, int write)
{ {
if (cache_is_vivt()) { if (cache_is_vivt()) {
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
unsigned long addr = (unsigned long)kaddr; unsigned long addr = (unsigned long)kaddr;
__cpuc_coherent_kern_range(addr, addr + len); __cpuc_coherent_kern_range(addr, addr + len);
} }
...@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, ...@@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
} }
/* VIPT non-aliasing cache */ /* VIPT non-aliasing cache */
if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
vma->vm_flags & VM_EXEC) { vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr; unsigned long addr = (unsigned long)kaddr;
/* only flushing the kernel mapping on non-aliasing VIPT */ /* only flushing the kernel mapping on non-aliasing VIPT */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册