未验证 提交 a3182c91 编写于 作者: A Anup Patel 提交者: Palmer Dabbelt

RISC-V: Access CSRs using CSR numbers

We should prefer accessing CSRs using their CSR numbers because:
1. It compiles fine with older toolchains.
2. We can use latest CSR names in #define macro names of CSR numbers
   as-per RISC-V spec.
3. We can access newly added CSRs even if toolchain does not recognize
   newly addes CSRs by name.
Signed-off-by: NAnup Patel <anup.patel@wdc.com>
Reviewed-by: NChristoph Hellwig <hch@lst.de>
Signed-off-by: NPalmer Dabbelt <palmer@sifive.com>
上级 6dcaf004
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#ifndef _ASM_RISCV_CSR_H #ifndef _ASM_RISCV_CSR_H
#define _ASM_RISCV_CSR_H #define _ASM_RISCV_CSR_H
#include <asm/asm.h>
#include <linux/const.h> #include <linux/const.h>
/* Status register flags */ /* Status register flags */
...@@ -79,12 +80,29 @@ ...@@ -79,12 +80,29 @@
#define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER) #define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
#define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT) #define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
#define CSR_INSTRET 0xc02
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
#define CSR_STVEC 0x105
#define CSR_SCOUNTEREN 0x106
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
#define CSR_SCAUSE 0x142
#define CSR_STVAL 0x143
#define CSR_SIP 0x144
#define CSR_SATP 0x180
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81
#define CSR_INSTRETH 0xc82
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#define csr_swap(csr, val) \ #define csr_swap(csr, val) \
({ \ ({ \
unsigned long __v = (unsigned long)(val); \ unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrrw %0, " #csr ", %1" \ __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \ : "=r" (__v) : "rK" (__v) \
: "memory"); \ : "memory"); \
__v; \ __v; \
...@@ -93,7 +111,7 @@ ...@@ -93,7 +111,7 @@
#define csr_read(csr) \ #define csr_read(csr) \
({ \ ({ \
register unsigned long __v; \ register unsigned long __v; \
__asm__ __volatile__ ("csrr %0, " #csr \ __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
: "=r" (__v) : \ : "=r" (__v) : \
: "memory"); \ : "memory"); \
__v; \ __v; \
...@@ -102,7 +120,7 @@ ...@@ -102,7 +120,7 @@
#define csr_write(csr, val) \ #define csr_write(csr, val) \
({ \ ({ \
unsigned long __v = (unsigned long)(val); \ unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrw " #csr ", %0" \ __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \ : : "rK" (__v) \
: "memory"); \ : "memory"); \
}) })
...@@ -110,7 +128,7 @@ ...@@ -110,7 +128,7 @@
#define csr_read_set(csr, val) \ #define csr_read_set(csr, val) \
({ \ ({ \
unsigned long __v = (unsigned long)(val); \ unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrrs %0, " #csr ", %1" \ __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \ : "=r" (__v) : "rK" (__v) \
: "memory"); \ : "memory"); \
__v; \ __v; \
...@@ -119,7 +137,7 @@ ...@@ -119,7 +137,7 @@
#define csr_set(csr, val) \ #define csr_set(csr, val) \
({ \ ({ \
unsigned long __v = (unsigned long)(val); \ unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrs " #csr ", %0" \ __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \ : : "rK" (__v) \
: "memory"); \ : "memory"); \
}) })
...@@ -127,7 +145,7 @@ ...@@ -127,7 +145,7 @@
#define csr_read_clear(csr, val) \ #define csr_read_clear(csr, val) \
({ \ ({ \
unsigned long __v = (unsigned long)(val); \ unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrrc %0, " #csr ", %1" \ __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
: "=r" (__v) : "rK" (__v) \ : "=r" (__v) : "rK" (__v) \
: "memory"); \ : "memory"); \
__v; \ __v; \
...@@ -136,7 +154,7 @@ ...@@ -136,7 +154,7 @@
#define csr_clear(csr, val) \ #define csr_clear(csr, val) \
({ \ ({ \
unsigned long __v = (unsigned long)(val); \ unsigned long __v = (unsigned long)(val); \
__asm__ __volatile__ ("csrc " #csr ", %0" \ __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
: : "rK" (__v) \ : : "rK" (__v) \
: "memory"); \ : "memory"); \
}) })
......
...@@ -21,25 +21,25 @@ ...@@ -21,25 +21,25 @@
/* read interrupt enabled status */ /* read interrupt enabled status */
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
return csr_read(sstatus); return csr_read(CSR_SSTATUS);
} }
/* unconditionally enable interrupts */ /* unconditionally enable interrupts */
static inline void arch_local_irq_enable(void) static inline void arch_local_irq_enable(void)
{ {
csr_set(sstatus, SR_SIE); csr_set(CSR_SSTATUS, SR_SIE);
} }
/* unconditionally disable interrupts */ /* unconditionally disable interrupts */
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
csr_clear(sstatus, SR_SIE); csr_clear(CSR_SSTATUS, SR_SIE);
} }
/* get status and disable interrupts */ /* get status and disable interrupts */
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
return csr_read_clear(sstatus, SR_SIE); return csr_read_clear(CSR_SSTATUS, SR_SIE);
} }
/* test flags */ /* test flags */
...@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void) ...@@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
/* set interrupt enabled status */ /* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags) static inline void arch_local_irq_restore(unsigned long flags)
{ {
csr_set(sstatus, flags & SR_SIE); csr_set(CSR_SSTATUS, flags & SR_SIE);
} }
#endif /* _ASM_RISCV_IRQFLAGS_H */ #endif /* _ASM_RISCV_IRQFLAGS_H */
...@@ -83,12 +83,7 @@ static inline void switch_mm(struct mm_struct *prev, ...@@ -83,12 +83,7 @@ static inline void switch_mm(struct mm_struct *prev,
cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next)); cpumask_set_cpu(cpu, mm_cpumask(next));
/* csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
* Use the old spbtr name instead of using the current satp
* name to support binutils 2.29 which doesn't know about the
* privileged ISA 1.10 yet.
*/
csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
local_flush_tlb_all(); local_flush_tlb_all();
flush_icache_deferred(next); flush_icache_deferred(next);
......
...@@ -37,11 +37,11 @@ ...@@ -37,11 +37,11 @@
* the kernel thread pointer. If we came from the kernel, sscratch * the kernel thread pointer. If we came from the kernel, sscratch
* will contain 0, and we should continue on the current TP. * will contain 0, and we should continue on the current TP.
*/ */
csrrw tp, sscratch, tp csrrw tp, CSR_SSCRATCH, tp
bnez tp, _save_context bnez tp, _save_context
_restore_kernel_tpsp: _restore_kernel_tpsp:
csrr tp, sscratch csrr tp, CSR_SSCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp) REG_S sp, TASK_TI_KERNEL_SP(tp)
_save_context: _save_context:
REG_S sp, TASK_TI_USER_SP(tp) REG_S sp, TASK_TI_USER_SP(tp)
...@@ -87,11 +87,11 @@ _save_context: ...@@ -87,11 +87,11 @@ _save_context:
li t0, SR_SUM | SR_FS li t0, SR_SUM | SR_FS
REG_L s0, TASK_TI_USER_SP(tp) REG_L s0, TASK_TI_USER_SP(tp)
csrrc s1, sstatus, t0 csrrc s1, CSR_SSTATUS, t0
csrr s2, sepc csrr s2, CSR_SEPC
csrr s3, sbadaddr csrr s3, CSR_STVAL
csrr s4, scause csrr s4, CSR_SCAUSE
csrr s5, sscratch csrr s5, CSR_SSCRATCH
REG_S s0, PT_SP(sp) REG_S s0, PT_SP(sp)
REG_S s1, PT_SSTATUS(sp) REG_S s1, PT_SSTATUS(sp)
REG_S s2, PT_SEPC(sp) REG_S s2, PT_SEPC(sp)
...@@ -107,8 +107,8 @@ _save_context: ...@@ -107,8 +107,8 @@ _save_context:
.macro RESTORE_ALL .macro RESTORE_ALL
REG_L a0, PT_SSTATUS(sp) REG_L a0, PT_SSTATUS(sp)
REG_L a2, PT_SEPC(sp) REG_L a2, PT_SEPC(sp)
csrw sstatus, a0 csrw CSR_SSTATUS, a0
csrw sepc, a2 csrw CSR_SEPC, a2
REG_L x1, PT_RA(sp) REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp) REG_L x3, PT_GP(sp)
...@@ -155,7 +155,7 @@ ENTRY(handle_exception) ...@@ -155,7 +155,7 @@ ENTRY(handle_exception)
* Set sscratch register to 0, so that if a recursive exception * Set sscratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel * occurs, the exception vector knows it came from the kernel
*/ */
csrw sscratch, x0 csrw CSR_SSCRATCH, x0
/* Load the global pointer */ /* Load the global pointer */
.option push .option push
...@@ -248,7 +248,7 @@ resume_userspace: ...@@ -248,7 +248,7 @@ resume_userspace:
* Save TP into sscratch, so we can find the kernel data structures * Save TP into sscratch, so we can find the kernel data structures
* again. * again.
*/ */
csrw sscratch, tp csrw CSR_SSCRATCH, tp
restore_all: restore_all:
RESTORE_ALL RESTORE_ALL
......
...@@ -22,9 +22,9 @@ ...@@ -22,9 +22,9 @@
__INIT __INIT
ENTRY(_start) ENTRY(_start)
/* Mask and clear all interrupts */ /* Mask all interrupts */
csrw sie, zero csrw CSR_SIE, zero
csrw sip, zero csrw CSR_SIP, zero
/* Load the global pointer */ /* Load the global pointer */
.option push .option push
...@@ -86,7 +86,7 @@ relocate: ...@@ -86,7 +86,7 @@ relocate:
/* Point stvec to virtual address of intruction after satp write */ /* Point stvec to virtual address of intruction after satp write */
la a0, 1f la a0, 1f
add a0, a0, a1 add a0, a0, a1
csrw stvec, a0 csrw CSR_STVEC, a0
/* Compute satp for kernel page tables, but don't load it yet */ /* Compute satp for kernel page tables, but don't load it yet */
la a2, swapper_pg_dir la a2, swapper_pg_dir
...@@ -102,12 +102,12 @@ relocate: ...@@ -102,12 +102,12 @@ relocate:
srl a0, a0, PAGE_SHIFT srl a0, a0, PAGE_SHIFT
or a0, a0, a1 or a0, a0, a1
sfence.vma sfence.vma
csrw sptbr, a0 csrw CSR_SATP, a0
.align 2 .align 2
1: 1:
/* Set trap vector to spin forever to help debug */ /* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park la a0, .Lsecondary_park
csrw stvec, a0 csrw CSR_STVEC, a0
/* Reload the global pointer */ /* Reload the global pointer */
.option push .option push
...@@ -116,7 +116,7 @@ relocate: ...@@ -116,7 +116,7 @@ relocate:
.option pop .option pop
/* Switch to kernel page tables */ /* Switch to kernel page tables */
csrw sptbr, a2 csrw CSR_SATP, a2
ret ret
...@@ -127,7 +127,7 @@ relocate: ...@@ -127,7 +127,7 @@ relocate:
/* Set trap vector to spin forever to help debug */ /* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park la a3, .Lsecondary_park
csrw stvec, a3 csrw CSR_STVEC, a3
slli a3, a0, LGREG slli a3, a0, LGREG
la a1, __cpu_up_stack_pointer la a1, __cpu_up_stack_pointer
......
...@@ -185,10 +185,10 @@ static inline u64 read_counter(int idx) ...@@ -185,10 +185,10 @@ static inline u64 read_counter(int idx)
switch (idx) { switch (idx) {
case RISCV_PMU_CYCLE: case RISCV_PMU_CYCLE:
val = csr_read(cycle); val = csr_read(CSR_CYCLE);
break; break;
case RISCV_PMU_INSTRET: case RISCV_PMU_INSTRET:
val = csr_read(instret); val = csr_read(CSR_INSTRET);
break; break;
default: default:
WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS); WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
......
...@@ -95,7 +95,7 @@ void riscv_software_interrupt(void) ...@@ -95,7 +95,7 @@ void riscv_software_interrupt(void)
unsigned long *stats = ipi_data[smp_processor_id()].stats; unsigned long *stats = ipi_data[smp_processor_id()].stats;
/* Clear pending IPI */ /* Clear pending IPI */
csr_clear(sip, SIE_SSIE); csr_clear(CSR_SIP, SIE_SSIE);
while (true) { while (true) {
unsigned long ops; unsigned long ops;
......
...@@ -159,9 +159,9 @@ void __init trap_init(void) ...@@ -159,9 +159,9 @@ void __init trap_init(void)
* Set sup0 scratch register to 0, indicating to exception vector * Set sup0 scratch register to 0, indicating to exception vector
* that we are presently executing in the kernel * that we are presently executing in the kernel
*/ */
csr_write(sscratch, 0); csr_write(CSR_SSCRATCH, 0);
/* Set the exception vector address */ /* Set the exception vector address */
csr_write(stvec, &handle_exception); csr_write(CSR_STVEC, &handle_exception);
/* Enable all interrupts */ /* Enable all interrupts */
csr_write(sie, -1); csr_write(CSR_SIE, -1);
} }
...@@ -239,13 +239,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -239,13 +239,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* Do _not_ use "tsk->active_mm->pgd" here. * Do _not_ use "tsk->active_mm->pgd" here.
* We might be inside an interrupt in the middle * We might be inside an interrupt in the middle
* of a task switch. * of a task switch.
*
* Note: Use the old spbtr name instead of using the current
* satp name to support binutils 2.29 which doesn't know about
* the privileged ISA 1.10 yet.
*/ */
index = pgd_index(addr); index = pgd_index(addr);
pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index; pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
pgd_k = init_mm.pgd + index; pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k)) if (!pgd_present(*pgd_k))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册