diff --git a/arch/sw_64/include/asm/hw_init.h b/arch/sw_64/include/asm/hw_init.h index 545e9a99a49cd23f0f34c72f4027949bde98b9ed..a36c811839ea6431009f316677782625716df0cc 100644 --- a/arch/sw_64/include/asm/hw_init.h +++ b/arch/sw_64/include/asm/hw_init.h @@ -20,8 +20,6 @@ struct cache_desc { struct cpuinfo_sw64 { unsigned long loops_per_jiffy; unsigned long last_asn; - int need_new_asn; - int asn_lock; unsigned long ipi_count; struct cache_desc icache; /* Primary I-cache */ struct cache_desc dcache; /* Primary D or combined I/D cache */ diff --git a/arch/sw_64/include/asm/mmu_context.h b/arch/sw_64/include/asm/mmu_context.h index 10199db1d63790dd6708c1f3e121eae8d135c64d..84e84048a3ba6527941b1b1271c1fdcafdddda3a 100644 --- a/arch/sw_64/include/asm/mmu_context.h +++ b/arch/sw_64/include/asm/mmu_context.h @@ -13,10 +13,9 @@ #include /* - * Force a context reload. This is needed when we change the page - * table pointer or when we update the ASID of the current process. + * Load a mm context. This is needed when we change the page + * table pointer(CSR:PTBR) or when we update the ASID. * - * CSR:UPN holds ASID and CSR:PTBR holds page table pointer. */ #define load_asn_ptbr load_mm @@ -69,17 +68,13 @@ __get_new_mm_context(struct mm_struct *mm, long cpu) } static inline void -switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, - struct task_struct *next) +switch_mm_irqs_off(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) { /* Check if our ASN is of an older version, and thus invalid. */ unsigned long asn, mmc, ptbr; long cpu = smp_processor_id(); -#ifdef CONFIG_SMP - cpu_data[cpu].asn_lock = 1; - barrier(); -#endif asn = cpu_last_asn(cpu); mmc = next_mm->context.asid[cpu]; if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { @@ -87,10 +82,6 @@ switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, mmc = __get_new_mm_context(next_mm, cpu); next_mm->context.asid[cpu] = mmc; } -#ifdef CONFIG_SMP - else - cpu_data[cpu].need_new_asn = 1; -#endif /* * Update CSR:UPN and CSR:PTBR. Another thread may have allocated @@ -102,31 +93,20 @@ switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, load_asn_ptbr(asn, ptbr); } -extern void __load_new_mm_context(struct mm_struct *); - -#ifdef CONFIG_SMP -#define check_mmu_context() \ -do { \ - int cpu = smp_processor_id(); \ - cpu_data[cpu].asn_lock = 0; \ - barrier(); \ - if (cpu_data[cpu].need_new_asn) { \ - struct mm_struct *mm = current->active_mm; \ - cpu_data[cpu].need_new_asn = 0; \ - if (!mm->context.asid[cpu]) \ - __load_new_mm_context(mm); \ - } \ -} while (0) -#else -#define check_mmu_context() do { } while (0) -#endif +#define switch_mm_irqs_off switch_mm_irqs_off -static inline void activate_mm(struct mm_struct *prev_mm, - struct mm_struct *next_mm) +static inline void +switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *tsk) { - __load_new_mm_context(next_mm); + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev_mm, next_mm, tsk); + local_irq_restore(flags); } +#define activate_mm(prev, next) switch_mm(prev, next, current) #define deactivate_mm(tsk, mm) do { } while (0) static inline int init_new_context(struct task_struct *tsk, diff --git a/arch/sw_64/include/asm/switch_to.h b/arch/sw_64/include/asm/switch_to.h index 967fe1d680da3be48842b914f692912bcbdcd2d6..e5596a735b2dbb4ef3fc98b86953db3c2666cd28 100644 --- a/arch/sw_64/include/asm/switch_to.h +++ b/arch/sw_64/include/asm/switch_to.h @@ -48,7 +48,6 @@ do { \ struct task_struct *__next = (next); \ __switch_to_aux(__prev, __next); \ (last) = __switch_to(__prev, __next); \ - check_mmu_context(); \ } while (0) diff --git a/arch/sw_64/include/asm/tlbflush.h b/arch/sw_64/include/asm/tlbflush.h index e508a4d66d3712b31cf4f7fd9f92a4796b14d671..b35af83e6ec271f27819b2f4a5c29ea13d16d0bf 100644 --- a/arch/sw_64/include/asm/tlbflush.h +++ b/arch/sw_64/include/asm/tlbflush.h @@ -8,13 +8,26 @@ #include #include #include - -extern void __load_new_mm_context(struct mm_struct *); - +#include static inline void flush_tlb_current(struct mm_struct *mm) { - __load_new_mm_context(mm); + unsigned long mmc, asn, ptbr, flags; + + local_irq_save(flags); + + mmc = __get_new_mm_context(mm, smp_processor_id()); + mm->context.asid[smp_processor_id()] = mmc; + + /* + * Force a new ASN for a task. Note that there is no way to + * write UPN only now, so call load_asn_ptbr here. + */ + asn = mmc & HARDWARE_ASN_MASK; + ptbr = virt_to_pfn(mm->pgd); + load_asn_ptbr(asn, ptbr); + + local_irq_restore(flags); } /* diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index 39103e4edee43781e6c78833b5d97e1fd17a9508..cb04aaa0cb971ea856c2e4461b0befae38859bcc 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -145,8 +145,6 @@ void store_cpu_data(int cpu) { cpu_data[cpu].loops_per_jiffy = loops_per_jiffy; cpu_data[cpu].last_asn = ASN_FIRST_VERSION; - cpu_data[cpu].need_new_asn = 0; - cpu_data[cpu].asn_lock = 0; } #ifdef CONFIG_KEXEC diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c index 8f752c604db0590c9b0ae1c54620676f2c7275e5..b66608c4934bbcbe92560cc485589ca02fbea5e3 100644 --- a/arch/sw_64/kernel/smp.c +++ b/arch/sw_64/kernel/smp.c @@ -499,8 +499,6 @@ void flush_tlb_all(void) on_each_cpu(ipi_flush_tlb_all, NULL, 1); } -#define asn_locked() (cpu_data[smp_processor_id()].asn_lock) - static void ipi_flush_tlb_mm(void *x) { struct mm_struct *mm = (struct mm_struct *) x; diff --git a/arch/sw_64/mm/fault.c b/arch/sw_64/mm/fault.c index 126752771b11f5d3ce368abd9f8821a6b8a8f4c9..574fe7930aacd97d3830d6e3347770f9dab4eac6 100644 --- a/arch/sw_64/mm/fault.c +++ b/arch/sw_64/mm/fault.c @@ -61,22 +61,6 @@ void show_all_vma(void) } } -/* - * Force a new ASN for a task. - */ -void __load_new_mm_context(struct mm_struct *next_mm) -{ - unsigned long mmc, asn, ptbr; - - mmc = __get_new_mm_context(next_mm, smp_processor_id()); - next_mm->context.asid[smp_processor_id()] = mmc; - - asn = mmc & HARDWARE_ASN_MASK; - ptbr = virt_to_pfn(next_mm->pgd); - - load_asn_ptbr(asn, ptbr); -} - /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to handle_mm_fault().