From c640a76516ba7d279ab3cf8f795a196bccc04e8c Mon Sep 17 00:00:00 2001 From: He Sheng Date: Thu, 8 Sep 2022 14:39:55 +0800 Subject: [PATCH] sw64: rename ASN to ASID Sunway inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I56OLG -------------------------------- ASID is a more common name than ASN. It also renames some related macros. Signed-off-by: He Sheng Reviewed-by: Cui Wei Signed-off-by: Gu Zitao --- arch/sw_64/include/asm/hmcall.h | 4 +- arch/sw_64/include/asm/hw_init.h | 2 +- arch/sw_64/include/asm/mmu_context.h | 66 +++++++++++----------------- arch/sw_64/include/asm/tlbflush.h | 10 ++--- arch/sw_64/kernel/setup.c | 2 +- 5 files changed, 35 insertions(+), 49 deletions(-) diff --git a/arch/sw_64/include/asm/hmcall.h b/arch/sw_64/include/asm/hmcall.h index 71d203efc587..5255d91e41a6 100644 --- a/arch/sw_64/include/asm/hmcall.h +++ b/arch/sw_64/include/asm/hmcall.h @@ -17,7 +17,7 @@ #define HMC_wrksp 0x0E #define HMC_mtinten 0x0F #define HMC_load_mm 0x11 -#define HMC_tbisasn 0x14 +#define HMC_tbisasid 0x14 #define HMC_tbivpn 0x19 #define HMC_ret 0x1A #define HMC_wrvpcr 0x29 @@ -167,7 +167,7 @@ __CALL_HMC_R0(whami, unsigned long); __CALL_HMC_RW1(rdio64, unsigned long, unsigned long); __CALL_HMC_RW1(rdio32, unsigned int, unsigned long); __CALL_HMC_W2(wrent, void*, unsigned long); -__CALL_HMC_W2(tbisasn, unsigned long, unsigned long); +__CALL_HMC_W2(tbisasid, unsigned long, unsigned long); __CALL_HMC_W1(wrkgp, unsigned long); __CALL_HMC_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); __CALL_HMC_RW3(sendii, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/arch/sw_64/include/asm/hw_init.h b/arch/sw_64/include/asm/hw_init.h index 81dd2581e0da..1fd7ed18c3f0 100644 --- a/arch/sw_64/include/asm/hw_init.h +++ b/arch/sw_64/include/asm/hw_init.h @@ -18,7 +18,7 @@ struct cache_desc { }; struct cpuinfo_sw64 { - unsigned long last_asn; + unsigned long last_asid; unsigned long ipi_count; struct cache_desc icache; /* Primary I-cache */ struct cache_desc dcache; /* Primary D or combined I/D cache */ diff --git a/arch/sw_64/include/asm/mmu_context.h b/arch/sw_64/include/asm/mmu_context.h index 84e84048a3ba..452da240ce99 100644 --- a/arch/sw_64/include/asm/mmu_context.h +++ b/arch/sw_64/include/asm/mmu_context.h @@ -2,11 +2,6 @@ #ifndef _ASM_SW64_MMU_CONTEXT_H #define _ASM_SW64_MMU_CONTEXT_H -/* - * get a new mmu context.. - * - * Copyright (C) 1996, Linus Torvalds - */ #include #include @@ -17,53 +12,44 @@ * table pointer(CSR:PTBR) or when we update the ASID. * */ -#define load_asn_ptbr load_mm +#define load_asid_ptbr load_mm /* - * The maximum ASN's the processor supports. ASN is called ASID too. + * The maximum ASID's the processor supports. */ #ifdef CONFIG_SUBARCH_C3B -#define WIDTH_HARDWARE_ASN 10 +#define ASID_BITS 10 #endif -/* - * cpu_last_asn(processor): - * 63 0 - * +-------------+----------------+--------------+ - * | asn version | this processor | hardware asn | - * +-------------+----------------+--------------+ - */ - #include -#define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) +#define last_asid(cpu) (cpu_data[cpu].last_asid) -#define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN) -#define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) +#define ASID_FIRST_VERSION (1UL << ASID_BITS) +#define ASID_MASK ((1UL << ASID_BITS) - 1) /* - * NOTE! The way this is set up, the high bits of the "asn_cache" (and - * the "mm->context") are the ASN _version_ code. A version of 0 is - * always considered invalid, so to invalidate another process you only - * need to do "p->mm->context = 0". + * NOTE! The way this is set up, the high bits of the "last_asid" (and + * the "mm->context.asid[cpu]") are the ASID _version_ code. A version + * of 0 is always considered invalid, so to invalidate another process + * you only need to do "p->mm->context.asid[cpu] = 0". * - * If we need more ASN's than the processor has, we invalidate the old - * user TLB's (tbivp()) and start a new ASN version. That will automatically - * force a new asn for any other processes the next time they want to - * run. + * If we need more ASID's than the processor has, we invalidate the old + * user TLB's (tbivp()) and start a new ASID version. That will force a + * new asid for any other processes the next time they want to run. */ static inline unsigned long __get_new_mm_context(struct mm_struct *mm, long cpu) { - unsigned long asn = cpu_last_asn(cpu); - unsigned long next = asn + 1; + unsigned long asid = last_asid(cpu); + unsigned long next = asid + 1; - if ((asn & HARDWARE_ASN_MASK) >= HARDWARE_ASN_MASK) { + if ((asid & ASID_MASK) >= ASID_MASK) { tbivp(); - next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; + next = (asid & ~ASID_MASK) + ASID_FIRST_VERSION; } - cpu_last_asn(cpu) = next; + last_asid(cpu) = next; return next; } @@ -71,26 +57,26 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev_mm, struct mm_struct *next_mm, struct task_struct *next) { - /* Check if our ASN is of an older version, and thus invalid. */ - unsigned long asn, mmc, ptbr; + /* Check if our ASID is of an older version, and thus invalid. */ + unsigned long asid, mmc, ptbr; long cpu = smp_processor_id(); - asn = cpu_last_asn(cpu); + asid = last_asid(cpu); mmc = next_mm->context.asid[cpu]; - if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { - /* Check if mmc and cpu asn is in the same version */ + if ((mmc ^ asid) & ~ASID_MASK) { + /* Check if mmc and cpu asid is in the same version */ mmc = __get_new_mm_context(next_mm, cpu); next_mm->context.asid[cpu] = mmc; } /* * Update CSR:UPN and CSR:PTBR. Another thread may have allocated - * a new mm->context[asid] (via flush_tlb_mm) without the ASN serial + * a new mm->context[asid] (via flush_tlb_mm) without the ASID serial * number wrapping. We have no way to detect when this is needed. */ - asn = mmc & HARDWARE_ASN_MASK; + asid = mmc & ASID_MASK; ptbr = virt_to_pfn(next_mm->pgd); - load_asn_ptbr(asn, ptbr); + load_asid_ptbr(asid, ptbr); } #define switch_mm_irqs_off switch_mm_irqs_off diff --git a/arch/sw_64/include/asm/tlbflush.h b/arch/sw_64/include/asm/tlbflush.h index b35af83e6ec2..f92a93cfe3db 100644 --- a/arch/sw_64/include/asm/tlbflush.h +++ b/arch/sw_64/include/asm/tlbflush.h @@ -12,7 +12,7 @@ static inline void flush_tlb_current(struct mm_struct *mm) { - unsigned long mmc, asn, ptbr, flags; + unsigned long mmc, asid, ptbr, flags; local_irq_save(flags); @@ -20,12 +20,12 @@ static inline void flush_tlb_current(struct mm_struct *mm) mm->context.asid[smp_processor_id()] = mmc; /* - * Force a new ASN for a task. Note that there is no way to - * write UPN only now, so call load_asn_ptbr here. + * Force a new ASID for a task. Note that there is no way to + * write UPN only now, so call load_asid_ptbr here. */ - asn = mmc & HARDWARE_ASN_MASK; + asid = mmc & ASID_MASK; ptbr = virt_to_pfn(mm->pgd); - load_asn_ptbr(asn, ptbr); + load_asid_ptbr(asid, ptbr); local_irq_restore(flags); } diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c index f68d93b5a7b7..d4c97741616f 100644 --- a/arch/sw_64/kernel/setup.c +++ b/arch/sw_64/kernel/setup.c @@ -143,7 +143,7 @@ EXPORT_SYMBOL(screen_info); */ void store_cpu_data(int cpu) { - cpu_data[cpu].last_asn = ASN_FIRST_VERSION; + cpu_data[cpu].last_asid = ASID_FIRST_VERSION; } #ifdef CONFIG_KEXEC -- GitLab