提交 b5eb5511 编写于 作者: R Ralf Baechle

[MIPS] Kill num_online_cpus() loops.

Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
上级 bd6aeeff
...@@ -769,7 +769,7 @@ void handle_exception(struct gdb_regs *regs) ...@@ -769,7 +769,7 @@ void handle_exception(struct gdb_regs *regs)
/* /*
* acquire the CPU spinlocks * acquire the CPU spinlocks
*/ */
for (i = num_online_cpus()-1; i >= 0; i--) for_each_online_cpu(i)
if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
panic("kgdb: couldn't get cpulock %d\n", i); panic("kgdb: couldn't get cpulock %d\n", i);
...@@ -1044,7 +1044,7 @@ void handle_exception(struct gdb_regs *regs) ...@@ -1044,7 +1044,7 @@ void handle_exception(struct gdb_regs *regs)
exit_kgdb_exception: exit_kgdb_exception:
/* release locks so other CPUs can go */ /* release locks so other CPUs can go */
for (i = num_online_cpus()-1; i >= 0; i--) for_each_online_cpu(i)
__raw_spin_unlock(&kgdb_cpulock[i]); __raw_spin_unlock(&kgdb_cpulock[i]);
spin_unlock(&kgdb_lock); spin_unlock(&kgdb_lock);
......
...@@ -375,10 +375,13 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -375,10 +375,13 @@ void flush_tlb_mm(struct mm_struct *mm)
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
} else { } else {
int i; cpumask_t mask = cpu_online_map;
for (i = 0; i < num_online_cpus(); i++) unsigned int cpu;
if (smp_processor_id() != i)
cpu_context(i, mm) = 0; cpu_clear(smp_processor_id(), mask);
for_each_online_cpu(cpu)
if (cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
} }
local_flush_tlb_mm(mm); local_flush_tlb_mm(mm);
...@@ -411,10 +414,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l ...@@ -411,10 +414,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
fd.addr2 = end; fd.addr2 = end;
smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
} else { } else {
int i; cpumask_t mask = cpu_online_map;
for (i = 0; i < num_online_cpus(); i++) unsigned int cpu;
if (smp_processor_id() != i)
cpu_context(i, mm) = 0; cpu_clear(smp_processor_id(), mask);
for_each_online_cpu(cpu)
if (cpu_context(cpu, mm))
cpu_context(cpu, mm) = 0;
} }
local_flush_tlb_range(vma, start, end); local_flush_tlb_range(vma, start, end);
preempt_enable(); preempt_enable();
...@@ -453,10 +459,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -453,10 +459,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
fd.addr1 = page; fd.addr1 = page;
smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
} else { } else {
int i; cpumask_t mask = cpu_online_map;
for (i = 0; i < num_online_cpus(); i++) unsigned int cpu;
if (smp_processor_id() != i)
cpu_context(i, vma->vm_mm) = 0; cpu_clear(smp_processor_id(), mask);
for_each_online_cpu(cpu)
if (cpu_context(cpu, vma->vm_mm))
cpu_context(cpu, vma->vm_mm) = 0;
} }
local_flush_tlb_page(vma, page); local_flush_tlb_page(vma, page);
preempt_enable(); preempt_enable();
......
...@@ -1264,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) ...@@ -1264,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
if (cpu_has_vtag_icache) if (cpu_has_vtag_icache)
flush_icache_all(); flush_icache_all();
/* Traverse all online CPUs (hack requires contigous range) */ /* Traverse all online CPUs (hack requires contigous range) */
for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) {
/* /*
* We don't need to worry about our own CPU, nor those of * We don't need to worry about our own CPU, nor those of
* CPUs who don't share our TLB. * CPUs who don't share our TLB.
...@@ -1293,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) ...@@ -1293,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
/* /*
* SMTC shares the TLB within VPEs and possibly across all VPEs. * SMTC shares the TLB within VPEs and possibly across all VPEs.
*/ */
for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) {
if ((smtc_status & SMTC_TLB_SHARED) || if ((smtc_status & SMTC_TLB_SHARED) ||
(cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = asid_cache(i) = asid; cpu_context(i, mm) = asid_cache(i) = asid;
......
...@@ -120,7 +120,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) ...@@ -120,7 +120,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{ {
int i; int i;
for (i = 0; i < num_online_cpus(); i++) for_each_online_cpu(i)
cpu_context(i, mm) = 0; cpu_context(i, mm) = 0;
return 0; return 0;
...@@ -284,7 +284,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) ...@@ -284,7 +284,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
int i; int i;
/* SMTC shares the TLB (and ASIDs) across VPEs */ /* SMTC shares the TLB (and ASIDs) across VPEs */
for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i) {
if((smtc_status & SMTC_TLB_SHARED) if((smtc_status & SMTC_TLB_SHARED)
|| (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = 0; cpu_context(i, mm) = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册