Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
下南居士
rt-thread
提交
ff15433c
R
rt-thread
项目概览
下南居士
/
rt-thread
与 Fork 源项目一致
Fork自
RT-Thread / rt-thread
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
rt-thread
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
提交
ff15433c
编写于
10月 05, 2022
作者:
W
Wayne Lin
提交者:
Bernard Xiong
10月 11, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Remove aarch64 CPU driver.
上级
d0b70ab7
变更
20
隐藏空白更改
内联
并排
Showing
20 changed file
with
0 addition
and
2758 deletion
+0
-2758
bsp/nuvoton/libraries/ma35/libcpu/aarch64/SConscript
bsp/nuvoton/libraries/ma35/libcpu/aarch64/SConscript
+0
-13
bsp/nuvoton/libraries/ma35/libcpu/aarch64/armv8.h
bsp/nuvoton/libraries/ma35/libcpu/aarch64/armv8.h
+0
-63
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cache.S
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cache.S
+0
-151
bsp/nuvoton/libraries/ma35/libcpu/aarch64/context_gcc.S
bsp/nuvoton/libraries/ma35/libcpu/aarch64/context_gcc.S
+0
-312
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cp15.h
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cp15.h
+0
-68
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cpu.c
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cpu.c
+0
-108
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cpu_gcc.S
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cpu_gcc.S
+0
-82
bsp/nuvoton/libraries/ma35/libcpu/aarch64/entry_point.S
bsp/nuvoton/libraries/ma35/libcpu/aarch64/entry_point.S
+0
-212
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gic.c
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gic.c
+0
-491
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gic.h
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gic.h
+0
-62
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gtimer.S
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gtimer.S
+0
-44
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gtimer.h
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gtimer.h
+0
-27
bsp/nuvoton/libraries/ma35/libcpu/aarch64/interrupt.c
bsp/nuvoton/libraries/ma35/libcpu/aarch64/interrupt.c
+0
-285
bsp/nuvoton/libraries/ma35/libcpu/aarch64/interrupt.h
bsp/nuvoton/libraries/ma35/libcpu/aarch64/interrupt.h
+0
-55
bsp/nuvoton/libraries/ma35/libcpu/aarch64/mmu.c
bsp/nuvoton/libraries/ma35/libcpu/aarch64/mmu.c
+0
-371
bsp/nuvoton/libraries/ma35/libcpu/aarch64/mmu.h
bsp/nuvoton/libraries/ma35/libcpu/aarch64/mmu.h
+0
-78
bsp/nuvoton/libraries/ma35/libcpu/aarch64/secondary_cpu.c
bsp/nuvoton/libraries/ma35/libcpu/aarch64/secondary_cpu.c
+0
-71
bsp/nuvoton/libraries/ma35/libcpu/aarch64/stack.c
bsp/nuvoton/libraries/ma35/libcpu/aarch64/stack.c
+0
-90
bsp/nuvoton/libraries/ma35/libcpu/aarch64/trap.c
bsp/nuvoton/libraries/ma35/libcpu/aarch64/trap.c
+0
-114
bsp/nuvoton/libraries/ma35/libcpu/aarch64/vector_gcc.S
bsp/nuvoton/libraries/ma35/libcpu/aarch64/vector_gcc.S
+0
-61
未找到文件。
bsp/nuvoton/libraries/ma35/libcpu/aarch64/SConscript
已删除
100644 → 0
浏览文件 @
d0b70ab7
# RT-Thread building script for component
from
building
import
*
Import
(
'rtconfig'
)
cwd
=
GetCurrentDir
()
src
=
Glob
(
'*.c'
)
+
Glob
(
'*.cpp'
)
+
Glob
(
'*.S'
)
CPPPATH
=
[
cwd
]
group
=
DefineGroup
(
'CPU'
,
src
,
depend
=
[
''
],
CPPPATH
=
CPPPATH
)
Return
(
'group'
)
bsp/nuvoton/libraries/ma35/libcpu/aarch64/armv8.h
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
*/
#ifndef __ARMV8_H__
#define __ARMV8_H__
/* the exception stack without VFP registers */
struct
rt_hw_exp_stack
{
unsigned
long
long
pc
;
unsigned
long
long
spsr
;
unsigned
long
long
x30
;
unsigned
long
long
xz
;
unsigned
long
long
x28
;
unsigned
long
long
x29
;
unsigned
long
long
x26
;
unsigned
long
long
x27
;
unsigned
long
long
x24
;
unsigned
long
long
x25
;
unsigned
long
long
x22
;
unsigned
long
long
x23
;
unsigned
long
long
x20
;
unsigned
long
long
x21
;
unsigned
long
long
x18
;
unsigned
long
long
x19
;
unsigned
long
long
x16
;
unsigned
long
long
x17
;
unsigned
long
long
x14
;
unsigned
long
long
x15
;
unsigned
long
long
x12
;
unsigned
long
long
x13
;
unsigned
long
long
x10
;
unsigned
long
long
x11
;
unsigned
long
long
x8
;
unsigned
long
long
x9
;
unsigned
long
long
x6
;
unsigned
long
long
x7
;
unsigned
long
long
x4
;
unsigned
long
long
x5
;
unsigned
long
long
x2
;
unsigned
long
long
x3
;
unsigned
long
long
x0
;
unsigned
long
long
x1
;
};
#define SP_ELx ( ( unsigned long long ) 0x01 )
#define SP_EL0 ( ( unsigned long long ) 0x00 )
#define PSTATE_EL1 ( ( unsigned long long ) 0x04 )
#define PSTATE_EL2 ( ( unsigned long long ) 0x08 )
#define PSTATE_EL3 ( ( unsigned long long ) 0x0c )
rt_ubase_t
rt_hw_get_current_el
(
void
);
void
rt_hw_set_elx_env
(
void
);
void
rt_hw_set_current_vbar
(
rt_ubase_t
addr
);
#endif
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cache.S
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
*
Copyright
(
c
)
2006
-
2020
,
RT
-
Thread
Development
Team
*
*
SPDX
-
License
-
Identifier
:
Apache
-
2
.0
*
*
Change
Logs
:
*
Date
Author
Notes
*
2020
-
03
-
17
bigmagic
first
version
*/
/*
*
void
__asm_dcache_level
(
level
)
*
*
flush
or
invalidate
one
level
cache
.
*
*
x0
:
cache
level
*
x1
:
0
clean
&
invalidate
,
1
invalidate
only
*
x2
~
x9
:
clobbered
*/
.
globl
__asm_dcache_level
__asm_dcache_level
:
lsl
x12
,
x0
,
#
1
msr
csselr_el1
,
x12
/*
select
cache
level
*/
isb
/*
sync
change
of
cssidr_el1
*/
mrs
x6
,
ccsidr_el1
/*
read
the
new
cssidr_el1
*/
and
x2
,
x6
,
#
7
/*
x2
<-
log2
(
cache
line
size
)-
4
*/
add
x2
,
x2
,
#
4
/*
x2
<-
log2
(
cache
line
size
)
*/
mov
x3
,
#
0x3ff
and
x3
,
x3
,
x6
,
lsr
#
3
/*
x3
<-
max
number
of
#
ways
*/
clz
w5
,
w3
/*
bit
position
of
#
ways
*/
mov
x4
,
#
0x7fff
and
x4
,
x4
,
x6
,
lsr
#
13
/*
x4
<-
max
number
of
#
sets
*/
/
*
x12
<-
cache
level
<<
1
*/
/
*
x2
<-
line
length
offset
*/
/
*
x3
<-
number
of
cache
ways
-
1
*/
/
*
x4
<-
number
of
cache
sets
-
1
*/
/
*
x5
<-
bit
position
of
#
ways
*/
loop_set
:
mov
x6
,
x3
/*
x6
<-
working
copy
of
#
ways
*/
loop_way
:
lsl
x7
,
x6
,
x5
orr
x9
,
x12
,
x7
/*
map
way
and
level
to
cisw
value
*/
lsl
x7
,
x4
,
x2
orr
x9
,
x9
,
x7
/*
map
set
number
to
cisw
value
*/
tbz
w1
,
#
0
,
1
f
dc
isw
,
x9
b
2
f
1
:
dc
cisw
,
x9
/*
clean
&
invalidate
by
set
/
way
*/
2
:
subs
x6
,
x6
,
#
1
/*
decrement
the
way
*/
b.ge
loop_way
subs
x4
,
x4
,
#
1
/*
decrement
the
set
*/
b.ge
loop_set
ret
/*
*
void
__asm_flush_dcache_all
(
int
invalidate_only
)
*
*
x0
:
0
clean
&
invalidate
,
1
invalidate
only
*
*
flush
or
invalidate
all
data
cache
by
SET
/
WAY
.
*/
.
globl
__asm_dcache_all
__asm_dcache_all
:
mov
x1
,
x0
dsb
sy
mrs
x10
,
clidr_el1
/*
read
clidr_el1
*/
lsr
x11
,
x10
,
#
24
and
x11
,
x11
,
#
0x7
/*
x11
<-
loc
*/
cbz
x11
,
finished
/*
if
loc
is
0
,
exit
*/
mov
x15
,
lr
mov
x0
,
#
0
/*
start
flush
at
cache
level
0
*/
/
*
x0
<-
cache
level
*/
/
*
x10
<-
clidr_el1
*/
/
*
x11
<-
loc
*/
/
*
x15
<-
return
address
*/
loop_level
:
lsl
x12
,
x0
,
#
1
add
x12
,
x12
,
x0
/*
x0
<-
tripled
cache
level
*/
lsr
x12
,
x10
,
x12
and
x12
,
x12
,
#
7
/*
x12
<-
cache
type
*/
cmp
x12
,
#
2
b.lt
skip
/*
skip
if
no
cache
or
icache
*/
bl
__asm_dcache_level
/*
x1
=
0
flush
,
1
invalidate
*/
skip
:
add
x0
,
x0
,
#
1
/*
increment
cache
level
*/
cmp
x11
,
x0
b.gt
loop_level
mov
x0
,
#
0
msr
csselr_el1
,
x0
/*
restore
csselr_el1
*/
dsb
sy
isb
mov
lr
,
x15
finished
:
ret
.
globl
__asm_flush_dcache_all
__asm_flush_dcache_all
:
mov
x0
,
#
0
b
__asm_dcache_all
.
globl
__asm_invalidate_dcache_all
__asm_invalidate_dcache_all
:
mov
x0
,
#
0x1
b
__asm_dcache_all
/*
*
void
__asm_flush_dcache_range
(
start
,
end
)
*
*
clean
&
invalidate
data
cache
in
the
range
*
*
x0
:
start
address
*
x1
:
end
address
*/
.
globl
__asm_flush_dcache_range
__asm_flush_dcache_range
:
mrs
x3
,
ctr_el0
lsr
x3
,
x3
,
#
16
and
x3
,
x3
,
#
0xf
mov
x2
,
#
4
lsl
x2
,
x2
,
x3
/*
cache
line
size
*/
/
*
x2
<-
minimal
cache
line
size
in
cache
system
*/
sub
x3
,
x2
,
#
1
bic
x0
,
x0
,
x3
1
:
dc
civac
,
x0
/*
clean
&
invalidate
data
or
unified
cache
*/
add
x0
,
x0
,
x2
cmp
x0
,
x1
b.lo
1
b
dsb
sy
ret
/*
*
void
__asm_invalidate_icache_all
(
void
)
*
*
invalidate
all
tlb
entries
.
*/
.
globl
__asm_invalidate_icache_all
__asm_invalidate_icache_all
:
ic
ialluis
isb
sy
ret
.
globl
__asm_flush_l3_cache
__asm_flush_l3_cache
:
mov
x0
,
#
0
/*
return
status
as
success
*/
ret
\ No newline at end of file
bsp/nuvoton/libraries/ma35/libcpu/aarch64/context_gcc.S
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
*
Copyright
(
c
)
2006
-
2020
,
RT
-
Thread
Development
Team
*
*
SPDX
-
License
-
Identifier
:
Apache
-
2
.0
*
*
Change
Logs
:
*
Date
Author
Notes
*
2018
-
10
-
06
ZhaoXiaowei
the
first
version
*/
.
macro
SAVE_CONTEXT
/
*
Switch
to
use
the
EL0
stack
pointer
.
*/
MSR
SPSEL
,
#
0
/
*
Save
the
entire
context
.
*/
STP
X0
,
X1
,
[
SP
,
#-
0x10
]!
STP
X2
,
X3
,
[
SP
,
#-
0x10
]!
STP
X4
,
X5
,
[
SP
,
#-
0x10
]!
STP
X6
,
X7
,
[
SP
,
#-
0x10
]!
STP
X8
,
X9
,
[
SP
,
#-
0x10
]!
STP
X10
,
X11
,
[
SP
,
#-
0x10
]!
STP
X12
,
X13
,
[
SP
,
#-
0x10
]!
STP
X14
,
X15
,
[
SP
,
#-
0x10
]!
STP
X16
,
X17
,
[
SP
,
#-
0x10
]!
STP
X18
,
X19
,
[
SP
,
#-
0x10
]!
STP
X20
,
X21
,
[
SP
,
#-
0x10
]!
STP
X22
,
X23
,
[
SP
,
#-
0x10
]!
STP
X24
,
X25
,
[
SP
,
#-
0x10
]!
STP
X26
,
X27
,
[
SP
,
#-
0x10
]!
STP
X28
,
X29
,
[
SP
,
#-
0x10
]!
STP
X30
,
XZR
,
[
SP
,
#-
0x10
]!
MRS
X0
,
CurrentEL
CMP
X0
,
0xc
B.EQ
3
f
CMP
X0
,
0x8
B.EQ
2
f
CMP
X0
,
0x4
B.EQ
1
f
B
.
3
:
MRS
X3
,
SPSR_EL3
/
*
Save
the
ELR
.
*/
MRS
X2
,
ELR_EL3
B
0
f
2
:
MRS
X3
,
SPSR_EL2
/
*
Save
the
ELR
.
*/
MRS
X2
,
ELR_EL2
B
0
f
1
:
MRS
X3
,
SPSR_EL1
MRS
X2
,
ELR_EL1
B
0
f
0
:
STP
X2
,
X3
,
[
SP
,
#-
0x10
]!
MOV
X0
,
SP
/*
Move
SP
into
X0
for
saving
.
*/
/
*
Switch
to
use
the
ELx
stack
pointer
.
*/
MSR
SPSEL
,
#
1
.
endm
.
macro
SAVE_CONTEXT_T
/
*
Switch
to
use
the
EL0
stack
pointer
.
*/
MSR
SPSEL
,
#
0
/
*
Save
the
entire
context
.
*/
STP
X0
,
X1
,
[
SP
,
#-
0x10
]!
STP
X2
,
X3
,
[
SP
,
#-
0x10
]!
STP
X4
,
X5
,
[
SP
,
#-
0x10
]!
STP
X6
,
X7
,
[
SP
,
#-
0x10
]!
STP
X8
,
X9
,
[
SP
,
#-
0x10
]!
STP
X10
,
X11
,
[
SP
,
#-
0x10
]!
STP
X12
,
X13
,
[
SP
,
#-
0x10
]!
STP
X14
,
X15
,
[
SP
,
#-
0x10
]!
STP
X16
,
X17
,
[
SP
,
#-
0x10
]!
STP
X18
,
X19
,
[
SP
,
#-
0x10
]!
STP
X20
,
X21
,
[
SP
,
#-
0x10
]!
STP
X22
,
X23
,
[
SP
,
#-
0x10
]!
STP
X24
,
X25
,
[
SP
,
#-
0x10
]!
STP
X26
,
X27
,
[
SP
,
#-
0x10
]!
STP
X28
,
X29
,
[
SP
,
#-
0x10
]!
STP
X30
,
XZR
,
[
SP
,
#-
0x10
]!
MRS
X0
,
CurrentEL
CMP
X0
,
0xc
B.EQ
3
f
CMP
X0
,
0x8
B.EQ
2
f
CMP
X0
,
0x4
B.EQ
1
f
B
.
3
:
MRS
X3
,
SPSR_EL3
MOV
X2
,
X30
B
0
f
2
:
MRS
X3
,
SPSR_EL2
MOV
X2
,
X30
B
0
f
1
:
MRS
X3
,
SPSR_EL1
MOV
X2
,
X30
B
0
f
0
:
STP
X2
,
X3
,
[
SP
,
#-
0x10
]!
MOV
X0
,
SP
/*
Move
SP
into
X0
for
saving
.
*/
/
*
Switch
to
use
the
ELx
stack
pointer
.
*/
MSR
SPSEL
,
#
1
.
endm
.
macro
RESTORE_CONTEXT
/
*
Switch
to
use
the
EL0
stack
pointer
.
*/
MSR
SPSEL
,
#
0
/
*
Set
the
SP
to
point
to
the
stack
of
the
task
being
restored
.
*/
MOV
SP
,
X0
LDP
X2
,
X3
,
[
SP
],
#
0x10
/*
SPSR
and
ELR
.
*/
MRS
X0
,
CurrentEL
CMP
X0
,
0xc
B.EQ
3
f
CMP
X0
,
0x8
B.EQ
2
f
CMP
X0
,
0x4
B.EQ
1
f
B
.
3
:
MSR
SPSR_EL3
,
X3
MSR
ELR_EL3
,
X2
B
0
f
2
:
MSR
SPSR_EL2
,
X3
MSR
ELR_EL2
,
X2
B
0
f
1
:
MSR
SPSR_EL1
,
X3
MSR
ELR_EL1
,
X2
B
0
f
0
:
LDP
X30
,
XZR
,
[
SP
],
#
0x10
LDP
X28
,
X29
,
[
SP
],
#
0x10
LDP
X26
,
X27
,
[
SP
],
#
0x10
LDP
X24
,
X25
,
[
SP
],
#
0x10
LDP
X22
,
X23
,
[
SP
],
#
0x10
LDP
X20
,
X21
,
[
SP
],
#
0x10
LDP
X18
,
X19
,
[
SP
],
#
0x10
LDP
X16
,
X17
,
[
SP
],
#
0x10
LDP
X14
,
X15
,
[
SP
],
#
0x10
LDP
X12
,
X13
,
[
SP
],
#
0x10
LDP
X10
,
X11
,
[
SP
],
#
0x10
LDP
X8
,
X9
,
[
SP
],
#
0x10
LDP
X6
,
X7
,
[
SP
],
#
0x10
LDP
X4
,
X5
,
[
SP
],
#
0x10
LDP
X2
,
X3
,
[
SP
],
#
0x10
LDP
X0
,
X1
,
[
SP
],
#
0x10
/
*
Switch
to
use
the
ELx
stack
pointer
.
_RB_
Might
not
be
required
.
*/
MSR
SPSEL
,
#
1
ERET
.
endm
.
text
/*
*
rt_base_t
rt_hw_interrupt_disable
()
;
*/
.
globl
rt_hw_interrupt_disable
rt_hw_interrupt_disable
:
MRS
X0
,
DAIF
MSR
DAIFSet
,
#
3
DSB
SY
RET
/*
*
void
rt_hw_interrupt_enable
(
rt_base_t
level
)
;
*/
.
globl
rt_hw_interrupt_enable
rt_hw_interrupt_enable
:
DSB
SY
MOV
X1
,
#
0xC0
ANDS
X0
,
X0
,
X1
B.NE
rt_hw_interrupt_enable_exit
MSR
DAIFClr
,
#
3
rt_hw_interrupt_enable_exit
:
RET
/*
*
void
rt_hw_context_switch_to
(
rt_ubase_t
to
)
;
*
r0
-->
to
*/
.
globl
rt_hw_context_switch_to
rt_hw_context_switch_to
:
LDR
X0
,
[
X0
]
RESTORE_CONTEXT
.
text
/*
*
void
rt_hw_context_switch
(
rt_ubase_t
from
,
rt_ubase_t
to
)
;
*
r0
-->
from
*
r1
-->
to
*/
.
globl
rt_hw_context_switch
rt_hw_context_switch
:
MOV
X8
,
X0
MOV
X9
,
X1
SAVE_CONTEXT_T
STR
X0
,
[
X8
]
//
store
sp
in
preempted
tasks
TCB
LDR
X0
,
[
X9
]
//
get
new
task
stack
pointer
RESTORE_CONTEXT
/*
*
void
rt_hw_context_switch_interrupt
(
rt_ubase_t
from
,
rt_ubase_t
to
)
;
*/
.
globl
rt_thread_switch_interrupt_flag
.
globl
rt_interrupt_from_thread
.
globl
rt_interrupt_to_thread
.
globl
rt_hw_context_switch_interrupt
rt_hw_context_switch_interrupt
:
ADR
X2
,
rt_thread_switch_interrupt_flag
LDR
X3
,
[
X2
]
CMP
X3
,
#
1
B.EQ
_reswitch
ADR
X4
,
rt_interrupt_from_thread
//
set
rt_interrupt_from_thread
MOV
X3
,
#
1
//
set
rt_thread_switch_interrupt_flag
to
1
STR
X0
,
[
X4
]
STR
X3
,
[
X2
]
_reswitch
:
ADR
X2
,
rt_interrupt_to_thread
//
set
rt_interrupt_to_thread
STR
X1
,
[
X2
]
RET
.
text
//
--
Exception
handlers
----------------------------------
.
align
8
.
globl
vector_fiq
vector_fiq
:
SAVE_CONTEXT
STP
X0
,
X1
,
[
SP
,
#-
0x10
]!
BL
rt_hw_trap_fiq
LDP
X0
,
X1
,
[
SP
],
#
0x10
RESTORE_CONTEXT
.
globl
rt_interrupt_enter
.
globl
rt_interrupt_leave
.
globl
rt_thread_switch_interrupt_flag
.
globl
rt_interrupt_from_thread
.
globl
rt_interrupt_to_thread
//
-------------------------------------------------------------------
.
align
8
.
globl
vector_irq
vector_irq
:
SAVE_CONTEXT
STP
X0
,
X1
,
[
SP
,
#-
0x10
]!
BL
rt_interrupt_enter
BL
rt_hw_trap_irq
BL
rt_interrupt_leave
LDP
X0
,
X1
,
[
SP
],
#
0x10
//
if
rt_thread_switch_interrupt_flag
set
,
jump
to
//
rt_hw_context_switch_interrupt_do
and
don
't return
ADR
X1
,
rt_thread_switch_interrupt_flag
LDR
X2
,
[
X1
]
CMP
X2
,
#
1
B.NE
vector_irq_exit
MOV
X2
,
#
0
//
clear
flag
STR
X2
,
[
X1
]
ADR
X3
,
rt_interrupt_from_thread
LDR
X4
,
[
X3
]
STR
x0
,
[
X4
]
//
store
sp
in
preempted
tasks
's TCB
ADR
x3
,
rt_interrupt_to_thread
LDR
X4
,
[
X3
]
LDR
x0
,
[
X4
]
//
get
new
task
's stack pointer
vector_irq_exit
:
RESTORE_CONTEXT
//
-------------------------------------------------
.
align
8
.
globl
vector_error
vector_error
:
SAVE_CONTEXT
BL
rt_hw_trap_error
B
.
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cp15.h
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
*/
#ifndef __CP15_H__
#define __CP15_H__
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
#endif
#define __WFI() __asm__ volatile ("wfi":::"memory")
#define __WFE() __asm__ volatile ("wfe":::"memory")
#define __SEV() __asm__ volatile ("sev")
/**
\brief Instruction Synchronization Barrier
\details Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or
memory, after the instruction has been completed.
*/
__STATIC_FORCEINLINE
void
__ISB
(
void
)
{
__asm__
volatile
(
"isb 0xF"
:::
"memory"
);
}
/**
\brief Data Synchronization Barrier
\details Acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
__STATIC_FORCEINLINE
void
__DSB
(
void
)
{
__asm__
volatile
(
"dsb 0xF"
:::
"memory"
);
}
/**
\brief Data Memory Barrier
\details Ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
__STATIC_FORCEINLINE
void
__DMB
(
void
)
{
__asm__
volatile
(
"dmb 0xF"
:::
"memory"
);
}
unsigned
long
rt_cpu_get_smp_id
(
void
);
void
rt_cpu_mmu_disable
(
void
);
void
rt_cpu_mmu_enable
(
void
);
void
rt_cpu_tlb_set
(
volatile
unsigned
long
*
);
void
rt_cpu_dcache_clean_flush
(
void
);
void
rt_cpu_icache_flush
(
void
);
void
rt_cpu_vector_set_base
(
rt_ubase_t
addr
);
void
rt_hw_mmu_init
(
void
);
void
rt_hw_vector_init
(
void
);
#endif
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cpu.c
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
* 2019-07-28 zdzn add smp support
*/
#include <rthw.h>
#include <rtthread.h>
#include <board.h>
#include "cp15.h"
int
rt_hw_cpu_id
(
void
)
{
int
cpu_id
;
rt_base_t
value
;
__asm__
volatile
(
"mrs %0, mpidr_el1"
:
"=r"
(
value
)
);
cpu_id
=
value
&
0xf
;
return
cpu_id
;
};
#ifdef RT_USING_SMP
void
rt_hw_spin_lock_init
(
rt_hw_spinlock_t
*
lock
)
{
lock
->
slock
=
0
;
}
void
rt_hw_spin_lock
(
rt_hw_spinlock_t
*
lock
)
{
unsigned
long
tmp
;
unsigned
long
newval
;
rt_hw_spinlock_t
lockval
;
__asm__
__volatile__
(
" prfm pstl1strm, %3
\n
"
"1: ldaxr %w0, %3
\n
"
" add %w1, %w0, %w5
\n
"
" stxr %w2, %w1, %3
\n
"
" cbnz %w2, 1b
\n
"
/* Did we get the lock? */
" eor %w1, %w0, %w0, ror #16
\n
"
" cbz %w1, 3f
\n
"
" sevl
\n
"
"2: wfe
\n
"
" ldaxrh %w2, %4
\n
"
" eor %w1, %w2, %w0, lsr #16
\n
"
" cbnz %w1, 2b
\n
"
"3:"
:
"=&r"
(
lockval
),
"=&r"
(
newval
),
"=&r"
(
tmp
),
"+Q"
(
*
lock
)
:
"Q"
(
lock
->
slock
),
"I"
(
1
<<
16
)
:
"memory"
);
}
void
rt_hw_spin_unlock
(
rt_hw_spinlock_t
*
lock
)
{
__DMB
();
lock
->
tickets
.
owner
++
;
__DSB
();
__SEV
();
}
#endif
/*RT_USING_SMP*/
/**
* @addtogroup ARM CPU
*/
/*@{*/
/** shutdown CPU */
RT_WEAK
void
rt_hw_cpu_shutdown
()
{
rt_uint32_t
level
;
rt_kprintf
(
"shutdown...
\n
"
);
level
=
rt_hw_interrupt_disable
();
while
(
level
)
{
RT_ASSERT
(
0
);
}
}
#ifdef RT_USING_CPU_FFS
/**
* This function finds the first bit set (beginning with the least significant bit)
* in value and return the index of that bit.
*
* Bits are numbered starting at 1 (the least significant bit). A return value of
* zero from any of these functions means that the argument was zero.
*
* @return return the index of the first bit set. If value is 0, then this function
* shall return 0.
*/
int
__rt_ffs
(
int
value
)
{
return
__builtin_ffs
(
value
);
}
#endif
/*@}*/
bsp/nuvoton/libraries/ma35/libcpu/aarch64/cpu_gcc.S
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
*
Copyright
(
c
)
2006
-
2020
,
RT
-
Thread
Development
Team
*
*
SPDX
-
License
-
Identifier
:
Apache
-
2
.0
*
*
Date
Author
Notes
*
2018
-
10
-
06
ZhaoXiaowei
the
first
version
*/
.
text
.
globl
rt_hw_get_current_el
rt_hw_get_current_el
:
MRS
X0
,
CurrentEL
CMP
X0
,
0xc
B.EQ
3
f
CMP
X0
,
0x8
B.EQ
2
f
CMP
X0
,
0x4
B.EQ
1
f
LDR
X0
,
=
0
B
0
f
3
:
LDR
X0
,
=
3
B
0
f
2
:
LDR
X0
,
=
2
B
0
f
1
:
LDR
X0
,
=
1
B
0
f
0
:
RET
.
globl
rt_hw_set_current_vbar
rt_hw_set_current_vbar
:
MRS
X1
,
CurrentEL
CMP
X1
,
0xc
B.EQ
3
f
CMP
X1
,
0x8
B.EQ
2
f
CMP
X1
,
0x4
B.EQ
1
f
B
0
f
3
:
MSR
VBAR_EL3
,
X0
B
0
f
2
:
MSR
VBAR_EL2
,
X0
B
0
f
1
:
MSR
VBAR_EL1
,
X0
B
0
f
0
:
RET
.
globl
rt_hw_set_elx_env
rt_hw_set_elx_env
:
MRS
X1
,
CurrentEL
CMP
X1
,
0xc
B.EQ
3
f
CMP
X1
,
0x8
B.EQ
2
f
CMP
X1
,
0x4
B.EQ
1
f
B
0
f
3
:
MRS
X0
,
SCR_EL3
ORR
X0
,
X0
,
#
0xF
/*
SCR_EL3
.
NS|IRQ|FIQ
|
EA
*/
MSR
SCR_EL3
,
X0
B
0
f
2
:
MRS
X0
,
HCR_EL2
ORR
X0
,
X0
,
#
0x38
MSR
HCR_EL2
,
X0
B
0
f
1
:
B
0
f
0
:
RET
bsp/nuvoton/libraries/ma35/libcpu/aarch64/entry_point.S
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
*
Copyright
(
c
)
2006
-
2020
,
RT
-
Thread
Development
Team
*
*
SPDX
-
License
-
Identifier
:
Apache
-
2
.0
*
*
Date
Author
Notes
*
2021
-
06
-
29
Wayne
the
first
version
*/
/*
GICv2
-
Distributor
Registers
*/
#define GICD_CTLR 0x0000
#define GICD_TYPER 0x0004
#define GICD_IIDR 0x0008
#define GICD_STATUSR 0x0010
#define GICD_SETSPI_NSR 0x0040
#define GICD_CLRSPI_NSR 0x0048
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
#define GICD_SEIR 0x0068
#define GICD_IGROUPRn 0x0080
#define GICD_ISENABLERn 0x0100
#define GICD_ICENABLERn 0x0180
#define GICD_ISPENDRn 0x0200
#define GICD_ICPENDRn 0x0280
#define GICD_ISACTIVERn 0x0300
#define GICD_ICACTIVERn 0x0380
#define GICD_IPRIORITYRn 0x0400
#define GICD_ITARGETSRn 0x0800
#define GICD_ICFGR 0x0c00
#define GICD_IGROUPMODRn 0x0d00
#define GICD_NSACRn 0x0e00
#define GICD_SGIR 0x0f00
#define GICD_CPENDSGIRn 0x0f10
#define GICD_SPENDSGIRn 0x0f20
#define GICD_IROUTERn 0x6000
/*
GICv2
-
CPU
Interface
Memory
Mapped
Registers
*/
#define GICC_CTLR 0x0000
#define GICC_PMR 0x0004
#define GICC_BPR 0x0008
#define GICC_IAR 0x000C
#define GICC_EOIR 0x0010
#define GICC_RPR 0x0014
#define GICC_HPPIR 0x0018
#define GICC_ABPR 0x001c
#define GICC_AIAR 0x0020
#define GICC_AEOIR 0x0024
#define GICC_AHPPIR 0x0028
#define GICC_APRn 0x00d0
#define GICC_NSAPRn 0x00e0
#define GICC_IIDR 0x00fc
#define GICC_DIR 0x1000
.
section
".
text.entrypoint
"
.
global
_start
_start
:
/*=============================================================*/
/
*
Read
CPU
id
*/
/
*
Primary
core
(
id
=
0
)
:
Help
Secondary
core
leaving
.
*/
/
*
Secondary
core
(
id
>
0
)
:
Notice
'Ready'
to
Primary
core
.
*/
/*=============================================================*/
/
*
MPIDR_EL1
:
Multi
-
Processor
Affinity
Register
*/
mrs
x1
,
mpidr_el1
and
x1
,
x1
,
#
3
cbz
x1
,
.
L__cpu_0
.
L__current_cpu_idle
:
/*=============================================================*/
/
*
Secondary
CPUs
*/
/*=============================================================*/
wfe
b
.
L__current_cpu_idle
.
L__cpu_0
:
/*=============================================================*/
/
*
Initialize
Gtimer
.
Set
frequency
to
12
MHz
.
*/
/*=============================================================*/
mov
x0
,
#
0x1B00
movk
x0
,
#
0xB7
,
LSL
#
16
msr
CNTFRQ_EL0
,
x0
/*=============================================================*/
/
*
Enable
GICv2
.
*/
/
*
Assign
all
IRQs
to
secure
group
.
*/
/*=============================================================*/
/
*
Route
to
secure
Group
*/
mov
x0
,
#
0x1000
movk
x0
,
#
0x5080
,
LSL
#
16
mov
w9
,
#
0x3
str
w9
,
[
x0
,
GICD_CTLR
]
ldr
w9
,
[
x0
,
GICD_TYPER
]
and
w10
,
w9
,
#
0x1f
cbz
w10
,
1
f
add
x11
,
x0
,
GICD_IGROUPRn
mov
w9
,
#
0
str
w9
,
[
x11
],
#
0x04
0
:
str
w9
,
[
x11
],
#
0x04
sub
w10
,
w10
,
#
0x1
cbnz
w10
,
0
b
mov
x1
,
#
0x2000
movk
x1
,
#
0x5080
,
LSL
#
16
mov
w0
,
#
3
str
w0
,
[
x1
]
mov
w0
,
#
1
<<
7
str
w0
,
[
x1
,
#
4
]
1
:
mov
x0
,
#
0x1000
movk
x0
,
#
0x5080
,
LSL
#
16
mov
x1
,
#
0x2000
movk
x1
,
#
0x5080
,
LSL
#
16
mov
w9
,
#
0
str
w9
,
[
x0
,
GICD_IGROUPRn
]
mov
w9
,
#
0x1
str
w9
,
[
x0
,
GICD_ISENABLERn
]
mov
w9
,
#
0x1e7
str
w9
,
[
x1
,
GICC_CTLR
]
mov
w9
,
#
0x1
<<
7
str
w9
,
[
x1
,
GICC_PMR
]
/*=============================================================*/
/
*
Enable
the
SMP
bit
.
*/
/*=============================================================*/
mrs
x0
,
S3_1_C15_C2_1
orr
x0
,
x0
,
#(
1
<<
6
)
msr
S3_1_C15_C2_1
,
x0
/*=============================================================*/
/
*
Enable
FP
/
SIMD
at
EL1
*/
/*=============================================================*/
mov
x0
,
#(
3
<<
20
)
msr
cpacr_el1
,
x0
/*
Enable
FP
/
SIMD
at
EL1
*/
/*=============================================================*/
/
*
Initialize
sctlr_el1
*/
/*=============================================================*/
mov
x0
,
xzr
orr
x0
,
x0
,
#(
1
<<
29
)
/*
Enable
LSMAOE
at
EL1
*/
orr
x0
,
x0
,
#(
1
<<
28
)
/*
Enable
nTLSMD
at
EL1
*/
orr
x0
,
x0
,
#(
1
<<
23
)
/*
Enable
SPAN
at
EL1
*/
orr
x0
,
x0
,
#(
1
<<
22
)
/*
Enable
EIS
at
EL1
*/
orr
x0
,
x0
,
#(
1
<<
20
)
/*
Enable
TSCXT
at
EL1
*/
orr
x0
,
x0
,
#(
1
<<
11
)
/*
Enable
EOS
at
EL1
*/
msr
sctlr_el1
,
x0
/*=============================================================*/
/
*
Initialize
scr_el3
*/
/*=============================================================*/
mov
x0
,
xzr
orr
x0
,
x0
,
#(
1
<<
10
)
/*
Enable
AARCH64
*/
orr
x0
,
x0
,
#(
1
<<
9
)
/*
Enable
SIF
*/
orr
x0
,
x0
,
#(
1
<<
8
)
/*
Enable
HCE
*/
orr
x0
,
x0
,
#(
1
<<
7
)
/*
Enable
SMD
*/
orr
x0
,
x0
,
#(
1
<<
5
)
/*
RES1
[
5
:
4
]
*/
orr
x0
,
x0
,
#(
1
<<
4
)
/
*
Disable
FIQ
routing
*/
/
*
Disable
IRQ
routing
*/
/
*
Disable
NS
*/
msr
scr_el3
,
x0
/*=============================================================*/
/
*
Initialize
spsr_el3
*/
/*=============================================================*/
mov
x0
,
xzr
mov
x0
,
#
0
b00101
/*
AARCH64_EL1
*/
orr
x0
,
x0
,
#(
1
<<
8
)
/*
Enable
SError
and
External
Abort
.
*/
orr
x0
,
x0
,
#(
1
<<
7
)
/*
IRQ
interrupt
Process
state
mask
.
*/
orr
x0
,
x0
,
#(
1
<<
6
)
/*
FIQ
interrupt
Process
state
mask
.
*/
msr
spsr_el3
,
x0
/*=============================================================*/
/
*
Initialize
elr_el3
*/
/
*
Jump
to
Secure
EL1
from
EL3
.
*/
/*=============================================================*/
adr
x0
,
.
aarch64_code
/*
Exception
return
to
aarch64_code
*/
msr
elr_el3
,
x0
eret
.
aarch64_code
:
ldr
x1
,
=
_start
mov
sp
,
x1
/*=============================================================*/
/
*
clear
bbs
*/
/*=============================================================*/
ldr
x1
,
=
__bss_start
ldr
w2
,
=
__bss_size
.
L__clean_bss_loop
:
cbz
w2
,
.
L__jump_to_entry
str
xzr
,
[
x1
],
#
8
sub
w2
,
w2
,
#
1
cbnz
w2
,
.
L__clean_bss_loop
/*=============================================================*/
/
*
jump
to
C
code
*/
/*=============================================================*/
.
L__jump_to_entry
:
bl
entry
/*=============================================================*/
/
*
for
failsafe
,
halt
this
core
too
*/
/*=============================================================*/
b
.
L__current_cpu_idle
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gic.c
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2014-04-03 Grissiom many enhancements
* 2018-11-22 Jesven add rt_hw_ipi_send()
* add rt_hw_ipi_handler_install()
*/
#include <rthw.h>
#include <rtthread.h>
#include "gic.h"
#include "cp15.h"
struct
arm_gic
{
rt_uint32_t
offset
;
/* the first interrupt index in the vector table */
rt_uint32_t
dist_hw_base
;
/* the base address of the gic distributor */
rt_uint32_t
cpu_hw_base
;
/* the base addrees of the gic cpu interface */
};
/* 'ARM_GIC_MAX_NR' is the number of cores */
static
struct
arm_gic
_gic_table
[
ARM_GIC_MAX_NR
];
/** Macro to access the Generic Interrupt Controller Interface (GICC)
*/
#define GIC_CPU_CTRL(hw_base) __REG32((hw_base) + 0x00U)
#define GIC_CPU_PRIMASK(hw_base) __REG32((hw_base) + 0x04U)
#define GIC_CPU_BINPOINT(hw_base) __REG32((hw_base) + 0x08U)
#define GIC_CPU_INTACK(hw_base) __REG32((hw_base) + 0x0cU)
#define GIC_CPU_EOI(hw_base) __REG32((hw_base) + 0x10U)
#define GIC_CPU_RUNNINGPRI(hw_base) __REG32((hw_base) + 0x14U)
#define GIC_CPU_HIGHPRI(hw_base) __REG32((hw_base) + 0x18U)
#define GIC_CPU_IIDR(hw_base) __REG32((hw_base) + 0xFCU)
/** Macro to access the Generic Interrupt Controller Distributor (GICD)
*/
#define GIC_DIST_CTRL(hw_base) __REG32((hw_base) + 0x000U)
#define GIC_DIST_TYPE(hw_base) __REG32((hw_base) + 0x004U)
#define GIC_DIST_IGROUP(hw_base, n) __REG32((hw_base) + 0x080U + ((n)/32U) * 4U)
#define GIC_DIST_ENABLE_SET(hw_base, n) __REG32((hw_base) + 0x100U + ((n)/32U) * 4U)
#define GIC_DIST_ENABLE_CLEAR(hw_base, n) __REG32((hw_base) + 0x180U + ((n)/32U) * 4U)
#define GIC_DIST_PENDING_SET(hw_base, n) __REG32((hw_base) + 0x200U + ((n)/32U) * 4U)
#define GIC_DIST_PENDING_CLEAR(hw_base, n) __REG32((hw_base) + 0x280U + ((n)/32U) * 4U)
#define GIC_DIST_ACTIVE_SET(hw_base, n) __REG32((hw_base) + 0x300U + ((n)/32U) * 4U)
#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) __REG32((hw_base) + 0x380U + ((n)/32U) * 4U)
#define GIC_DIST_PRI(hw_base, n) __REG32((hw_base) + 0x400U + ((n)/4U) * 4U)
#define GIC_DIST_TARGET(hw_base, n) __REG32((hw_base) + 0x800U + ((n)/4U) * 4U)
#define GIC_DIST_CONFIG(hw_base, n) __REG32((hw_base) + 0xc00U + ((n)/16U) * 4U)
#define GIC_DIST_SOFTINT(hw_base) __REG32((hw_base) + 0xf00U)
#define GIC_DIST_CPENDSGI(hw_base, n) __REG32((hw_base) + 0xf10U + ((n)/4U) * 4U)
#define GIC_DIST_SPENDSGI(hw_base, n) __REG32((hw_base) + 0xf20U + ((n)/4U) * 4U)
#define GIC_DIST_ICPIDR2(hw_base) __REG32((hw_base) + 0xfe8U)
static
unsigned
int
_gic_max_irq
;
int
arm_gic_get_active_irq
(
rt_uint32_t
index
)
{
int
irq
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
GIC_CPU_INTACK
(
_gic_table
[
index
].
cpu_hw_base
);
irq
+=
_gic_table
[
index
].
offset
;
return
irq
;
}
void
arm_gic_ack
(
rt_uint32_t
index
,
int
irq
)
{
rt_uint32_t
mask
=
1U
<<
(
irq
%
32U
);
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
GIC_DIST_PENDING_CLEAR
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
mask
;
GIC_CPU_EOI
(
_gic_table
[
index
].
cpu_hw_base
)
=
irq
;
}
void
arm_gic_mask
(
rt_uint32_t
index
,
int
irq
)
{
rt_uint32_t
mask
=
1U
<<
(
irq
%
32U
);
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
GIC_DIST_ENABLE_CLEAR
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
mask
;
}
void
arm_gic_umask
(
rt_uint32_t
index
,
int
irq
)
{
rt_uint32_t
mask
=
1U
<<
(
irq
%
32U
);
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
GIC_DIST_ENABLE_SET
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
mask
;
}
rt_uint32_t
arm_gic_get_pending_irq
(
rt_uint32_t
index
,
int
irq
)
{
rt_uint32_t
pend
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
if
(
irq
>=
16U
)
{
pend
=
(
GIC_DIST_PENDING_SET
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
>>
(
irq
%
32U
))
&
0x1UL
;
}
else
{
/* INTID 0-15 Software Generated Interrupt */
pend
=
(
GIC_DIST_SPENDSGI
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
>>
((
irq
%
4U
)
*
8U
))
&
0xFFUL
;
/* No CPU identification offered */
if
(
pend
!=
0U
)
{
pend
=
1U
;
}
else
{
pend
=
0U
;
}
}
return
(
pend
);
}
void
arm_gic_set_pending_irq
(
rt_uint32_t
index
,
int
irq
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
if
(
irq
>=
16U
)
{
GIC_DIST_PENDING_SET
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
1U
<<
(
irq
%
32U
);
}
else
{
/* INTID 0-15 Software Generated Interrupt */
/* Forward the interrupt to the CPU interface that requested it */
GIC_DIST_SOFTINT
(
_gic_table
[
index
].
dist_hw_base
)
=
(
irq
|
0x02000000U
);
}
}
void
arm_gic_clear_pending_irq
(
rt_uint32_t
index
,
int
irq
)
{
rt_uint32_t
mask
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
if
(
irq
>=
16U
)
{
mask
=
1U
<<
(
irq
%
32U
);
GIC_DIST_PENDING_CLEAR
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
mask
;
}
else
{
mask
=
1U
<<
((
irq
%
4U
)
*
8U
);
GIC_DIST_CPENDSGI
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
mask
;
}
}
void
arm_gic_set_configuration
(
rt_uint32_t
index
,
int
irq
,
uint32_t
config
)
{
rt_uint32_t
icfgr
;
rt_uint32_t
shift
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
icfgr
=
GIC_DIST_CONFIG
(
_gic_table
[
index
].
dist_hw_base
,
irq
);
shift
=
(
irq
%
16U
)
<<
1U
;
icfgr
&=
(
~
(
3U
<<
shift
));
icfgr
|=
(
config
<<
shift
);
GIC_DIST_CONFIG
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
icfgr
;
}
rt_uint32_t
arm_gic_get_configuration
(
rt_uint32_t
index
,
int
irq
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
return
(
GIC_DIST_CONFIG
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
>>
((
irq
%
16U
)
>>
1U
));
}
void
arm_gic_clear_active
(
rt_uint32_t
index
,
int
irq
)
{
rt_uint32_t
mask
=
1U
<<
(
irq
%
32U
);
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
GIC_DIST_ACTIVE_CLEAR
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
mask
;
}
/* Set up the cpu mask for the specific interrupt */
void
arm_gic_set_cpu
(
rt_uint32_t
index
,
int
irq
,
unsigned
int
cpumask
)
{
rt_uint32_t
old_tgt
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
old_tgt
=
GIC_DIST_TARGET
(
_gic_table
[
index
].
dist_hw_base
,
irq
);
old_tgt
&=
~
(
0x0FFUL
<<
((
irq
%
4U
)
*
8U
));
old_tgt
|=
cpumask
<<
((
irq
%
4U
)
*
8U
);
GIC_DIST_TARGET
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
old_tgt
;
}
rt_uint32_t
arm_gic_get_target_cpu
(
rt_uint32_t
index
,
int
irq
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
return
(
GIC_DIST_TARGET
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
>>
((
irq
%
4U
)
*
8U
))
&
0xFFUL
;
}
void
arm_gic_set_priority
(
rt_uint32_t
index
,
int
irq
,
rt_uint32_t
priority
)
{
rt_uint32_t
mask
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
mask
=
GIC_DIST_PRI
(
_gic_table
[
index
].
dist_hw_base
,
irq
);
mask
&=
~
(
0xFFUL
<<
((
irq
%
4U
)
*
8U
));
mask
|=
((
priority
&
0xFFUL
)
<<
((
irq
%
4U
)
*
8U
));
GIC_DIST_PRI
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
mask
;
}
rt_uint32_t
arm_gic_get_priority
(
rt_uint32_t
index
,
int
irq
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
return
(
GIC_DIST_PRI
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
>>
((
irq
%
4U
)
*
8U
))
&
0xFFUL
;
}
void
arm_gic_set_interface_prior_mask
(
rt_uint32_t
index
,
rt_uint32_t
priority
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
/* set priority mask */
GIC_CPU_PRIMASK
(
_gic_table
[
index
].
cpu_hw_base
)
=
priority
&
0xFFUL
;
}
rt_uint32_t
arm_gic_get_interface_prior_mask
(
rt_uint32_t
index
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
return
GIC_CPU_PRIMASK
(
_gic_table
[
index
].
cpu_hw_base
);
}
void
arm_gic_set_binary_point
(
rt_uint32_t
index
,
rt_uint32_t
binary_point
)
{
GIC_CPU_BINPOINT
(
_gic_table
[
index
].
cpu_hw_base
)
=
binary_point
&
0x7U
;
}
rt_uint32_t
arm_gic_get_binary_point
(
rt_uint32_t
index
)
{
return
GIC_CPU_BINPOINT
(
_gic_table
[
index
].
cpu_hw_base
);
}
rt_uint32_t
arm_gic_get_irq_status
(
rt_uint32_t
index
,
int
irq
)
{
rt_uint32_t
pending
;
rt_uint32_t
active
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
active
=
(
GIC_DIST_ACTIVE_SET
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
>>
(
irq
%
32U
))
&
0x1UL
;
pending
=
(
GIC_DIST_PENDING_SET
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
>>
(
irq
%
32U
))
&
0x1UL
;
return
((
active
<<
1U
)
|
pending
);
}
void
arm_gic_send_sgi
(
rt_uint32_t
index
,
int
irq
,
rt_uint32_t
target_list
,
rt_uint32_t
filter_list
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
GIC_DIST_SOFTINT
(
_gic_table
[
index
].
dist_hw_base
)
=
((
filter_list
&
0x3U
)
<<
24U
)
|
((
target_list
&
0xFFUL
)
<<
16U
)
|
(
irq
&
0x0FUL
);
}
rt_uint32_t
arm_gic_get_high_pending_irq
(
rt_uint32_t
index
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
return
GIC_CPU_HIGHPRI
(
_gic_table
[
index
].
cpu_hw_base
);
}
rt_uint32_t
arm_gic_get_interface_id
(
rt_uint32_t
index
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
return
GIC_CPU_IIDR
(
_gic_table
[
index
].
cpu_hw_base
);
}
void
arm_gic_set_group
(
rt_uint32_t
index
,
int
irq
,
rt_uint32_t
group
)
{
uint32_t
igroupr
;
uint32_t
shift
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
RT_ASSERT
(
group
<=
1U
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
igroupr
=
GIC_DIST_IGROUP
(
_gic_table
[
index
].
dist_hw_base
,
irq
);
shift
=
(
irq
%
32U
);
igroupr
&=
(
~
(
1U
<<
shift
));
igroupr
|=
((
group
&
0x1U
)
<<
shift
);
GIC_DIST_IGROUP
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
=
igroupr
;
}
rt_uint32_t
arm_gic_get_group
(
rt_uint32_t
index
,
int
irq
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
irq
=
irq
-
_gic_table
[
index
].
offset
;
RT_ASSERT
(
irq
>=
0U
);
return
(
GIC_DIST_IGROUP
(
_gic_table
[
index
].
dist_hw_base
,
irq
)
>>
(
irq
%
32U
))
&
0x1UL
;
}
int
arm_gic_dist_init
(
rt_uint32_t
index
,
rt_uint32_t
dist_base
,
int
irq_start
)
{
unsigned
int
gic_type
,
i
;
rt_uint32_t
cpumask
=
1U
<<
0U
;
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
_gic_table
[
index
].
dist_hw_base
=
dist_base
;
_gic_table
[
index
].
offset
=
irq_start
;
/* Find out how many interrupts are supported. */
gic_type
=
GIC_DIST_TYPE
(
dist_base
);
_gic_max_irq
=
((
gic_type
&
0x1fU
)
+
1U
)
*
32U
;
/*
* The GIC only supports up to 1020 interrupt sources.
* Limit this to either the architected maximum, or the
* platform maximum.
*/
if
(
_gic_max_irq
>
1020U
)
_gic_max_irq
=
1020U
;
if
(
_gic_max_irq
>
ARM_GIC_NR_IRQS
)
/* the platform maximum interrupts */
_gic_max_irq
=
ARM_GIC_NR_IRQS
;
cpumask
|=
cpumask
<<
8U
;
cpumask
|=
cpumask
<<
16U
;
cpumask
|=
cpumask
<<
24U
;
GIC_DIST_CTRL
(
dist_base
)
=
0x0U
;
/* Set all global interrupts to be level triggered, active low. */
for
(
i
=
32U
;
i
<
_gic_max_irq
;
i
+=
16U
)
GIC_DIST_CONFIG
(
dist_base
,
i
)
=
0x0U
;
/* Set all global interrupts to this CPU only. */
for
(
i
=
32U
;
i
<
_gic_max_irq
;
i
+=
4U
)
GIC_DIST_TARGET
(
dist_base
,
i
)
=
cpumask
;
/* Set priority on all interrupts. */
for
(
i
=
0U
;
i
<
_gic_max_irq
;
i
+=
4U
)
GIC_DIST_PRI
(
dist_base
,
i
)
=
0xa0a0a0a0U
;
/* Disable all interrupts. */
for
(
i
=
0U
;
i
<
_gic_max_irq
;
i
+=
32U
)
GIC_DIST_ENABLE_CLEAR
(
dist_base
,
i
)
=
0xffffffffU
;
/* Important: Below setting need be done in Secure world. */
for
(
i
=
0U
;
i
<
_gic_max_irq
;
i
+=
32U
)
GIC_DIST_IGROUP
(
dist_base
,
i
)
=
0U
;
/* Enable group0 interrupt forwarding. */
GIC_DIST_CTRL
(
dist_base
)
=
0x01U
;
return
0
;
}
int
arm_gic_cpu_init
(
rt_uint32_t
index
,
rt_uint32_t
cpu_base
)
{
RT_ASSERT
(
index
<
ARM_GIC_MAX_NR
);
_gic_table
[
index
].
cpu_hw_base
=
cpu_base
;
GIC_CPU_PRIMASK
(
cpu_base
)
=
0xf0U
;
GIC_CPU_BINPOINT
(
cpu_base
)
=
0x7U
;
/* Enable CPU interrupt */
GIC_CPU_CTRL
(
cpu_base
)
=
0x01U
;
return
0
;
}
void
arm_gic_dump_type
(
rt_uint32_t
index
)
{
unsigned
int
gic_type
;
gic_type
=
GIC_DIST_TYPE
(
_gic_table
[
index
].
dist_hw_base
);
rt_kprintf
(
"GICv%d on %p, max IRQs: %d, %s security extension(%08x)
\n
"
,
(
GIC_DIST_ICPIDR2
(
_gic_table
[
index
].
dist_hw_base
)
>>
4U
)
&
0xfUL
,
_gic_table
[
index
].
dist_hw_base
,
_gic_max_irq
,
gic_type
&
(
1U
<<
10U
)
?
"has"
:
"no"
,
gic_type
);
}
void
arm_gic_dump
(
rt_uint32_t
index
)
{
unsigned
int
i
,
k
;
k
=
GIC_CPU_HIGHPRI
(
_gic_table
[
index
].
cpu_hw_base
);
rt_kprintf
(
"--- high pending priority: %d(%08x)
\n
"
,
k
,
k
);
rt_kprintf
(
"--- hw mask ---
\n
"
);
for
(
i
=
0U
;
i
<
_gic_max_irq
/
32U
;
i
++
)
{
rt_kprintf
(
"0x%08x, "
,
GIC_DIST_ENABLE_SET
(
_gic_table
[
index
].
dist_hw_base
,
i
*
32U
));
}
rt_kprintf
(
"
\n
--- hw pending ---
\n
"
);
for
(
i
=
0U
;
i
<
_gic_max_irq
/
32U
;
i
++
)
{
rt_kprintf
(
"0x%08x, "
,
GIC_DIST_PENDING_SET
(
_gic_table
[
index
].
dist_hw_base
,
i
*
32U
));
}
rt_kprintf
(
"
\n
--- hw active ---
\n
"
);
for
(
i
=
0U
;
i
<
_gic_max_irq
/
32U
;
i
++
)
{
rt_kprintf
(
"0x%08x, "
,
GIC_DIST_ACTIVE_SET
(
_gic_table
[
index
].
dist_hw_base
,
i
*
32U
));
}
rt_kprintf
(
"
\n
"
);
}
long
gic_dump
(
void
)
{
arm_gic_dump_type
(
0
);
arm_gic_dump
(
0
);
return
0
;
}
MSH_CMD_EXPORT
(
gic_dump
,
show
gic
status
);
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gic.h
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
*/
#ifndef __GIC_H__
#define __GIC_H__
#include <rthw.h>
#include <board.h>
int
arm_gic_get_active_irq
(
rt_uint32_t
index
);
void
arm_gic_ack
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_mask
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_umask
(
rt_uint32_t
index
,
int
irq
);
rt_uint32_t
arm_gic_get_pending_irq
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_set_pending_irq
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_clear_pending_irq
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_set_configuration
(
rt_uint32_t
index
,
int
irq
,
uint32_t
config
);
rt_uint32_t
arm_gic_get_configuration
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_clear_active
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_set_cpu
(
rt_uint32_t
index
,
int
irq
,
unsigned
int
cpumask
);
rt_uint32_t
arm_gic_get_target_cpu
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_set_priority
(
rt_uint32_t
index
,
int
irq
,
rt_uint32_t
priority
);
rt_uint32_t
arm_gic_get_priority
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_set_interface_prior_mask
(
rt_uint32_t
index
,
rt_uint32_t
priority
);
rt_uint32_t
arm_gic_get_interface_prior_mask
(
rt_uint32_t
index
);
void
arm_gic_set_binary_point
(
rt_uint32_t
index
,
rt_uint32_t
binary_point
);
rt_uint32_t
arm_gic_get_binary_point
(
rt_uint32_t
index
);
rt_uint32_t
arm_gic_get_irq_status
(
rt_uint32_t
index
,
int
irq
);
void
arm_gic_send_sgi
(
rt_uint32_t
index
,
int
irq
,
rt_uint32_t
target_list
,
rt_uint32_t
filter_list
);
rt_uint32_t
arm_gic_get_high_pending_irq
(
rt_uint32_t
index
);
rt_uint32_t
arm_gic_get_interface_id
(
rt_uint32_t
index
);
void
arm_gic_set_group
(
rt_uint32_t
index
,
int
irq
,
rt_uint32_t
group
);
rt_uint32_t
arm_gic_get_group
(
rt_uint32_t
index
,
int
irq
);
int
arm_gic_dist_init
(
rt_uint32_t
index
,
rt_uint32_t
dist_base
,
int
irq_start
);
int
arm_gic_cpu_init
(
rt_uint32_t
index
,
rt_uint32_t
cpu_base
);
void
arm_gic_dump_type
(
rt_uint32_t
index
);
void
arm_gic_dump
(
rt_uint32_t
index
);
#endif
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gtimer.S
已删除
100644 → 0
浏览文件 @
d0b70ab7
#include "rtconfig.h"
/*
*
enable
gtimer
*/
.
globl
gtimer_set_control
gtimer_set_control
:
MSR
CNTP_CTL_EL0
,
X0
RET
/*
*
set
gtimer
CNTP_TVAL_EL0
value
*/
.
globl
gtimer_set_load_value
gtimer_set_load_value
:
MSR
CNTP_TVAL_EL0
,
X0
RET
/*
*
get
gtimer
CNTP_TVAL_EL0
value
*/
.
globl
rt_hw_get_gtimer_val
rt_hw_get_gtimer_val
:
MRS
X0
,
CNTP_TVAL_EL0
RET
.
globl
gtimer_get_current_value
gtimer_get_current_value
:
MRS
X0
,
CNTP_TVAL_EL0
RET
.
globl
rt_hw_get_cntpct_val
rt_hw_get_cntpct_val
:
MRS
X0
,
CNTPCT_EL0
RET
/*
*
get
gtimer
frq
value
*/
.
globl
gtimer_get_counter_frequency
gtimer_get_counter_frequency
:
MRS
X0
,
CNTFRQ_EL0
RET
bsp/nuvoton/libraries/ma35/libcpu/aarch64/gtimer.h
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-03-30 huijie.feng first version
*/
#ifndef __GTIMER_H__
#define __GTIMER_H__
#include <rtdef.h>
void
gtimer_set_counter_frequency
(
rt_uint32_t
value
);
rt_uint32_t
gtimer_get_counter_frequency
(
void
);
void
gtimer_set_load_value
(
rt_uint32_t
value
);
rt_uint32_t
gtimer_get_current_value
(
void
);
rt_uint64_t
gtimer_get_current_physical_value
(
void
);
void
gtimer_set_physical_compare_value
(
rt_uint64_t
value
);
rt_uint64_t
gtimer_get_physical_compare_value
(
void
);
void
gtimer_set_control
(
rt_uint32_t
value
);
rt_uint32_t
gtimer_get_control
(
void
);
#endif
bsp/nuvoton/libraries/ma35/libcpu/aarch64/interrupt.c
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-04-16 bigmagic first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <gic.h>
#include <board.h>
#include <armv8.h>
#define MAX_HANDLERS 256
#define GIC_ACK_INTID_MASK 0x000003ff
#ifdef RT_USING_SMP
#define rt_interrupt_nest rt_cpu_self()->irq_nest
#else
extern
volatile
rt_uint8_t
rt_interrupt_nest
;
#endif
/* exception and interrupt handler table */
struct
rt_irq_desc
isr_table
[
MAX_HANDLERS
];
#ifndef RT_USING_SMP
/* Those varibles will be accessed in ISR, so we need to share them. */
rt_ubase_t
rt_interrupt_from_thread
;
rt_ubase_t
rt_interrupt_to_thread
;
rt_ubase_t
rt_thread_switch_interrupt_flag
;
#endif
extern
int
system_vectors
;
void
rt_hw_vector_init
(
void
)
{
rt_hw_set_current_vbar
((
rt_ubase_t
)
&
system_vectors
);
// cpu_gcc.S
}
/**
* This function will initialize hardware interrupt
*/
void
rt_hw_interrupt_init
(
void
)
{
rt_uint32_t
gic_cpu_base
;
rt_uint32_t
gic_dist_base
;
rt_uint32_t
gic_irq_start
;
/* initialize vector table */
rt_hw_vector_init
();
/* initialize exceptions table */
rt_memset
(
isr_table
,
0x00
,
sizeof
(
isr_table
));
/* initialize ARM GIC */
gic_dist_base
=
GIC_DISTRIBUTOR_BASE
;
gic_cpu_base
=
GIC_INTERFACE_BASE
;
gic_irq_start
=
GIC_IRQ_START
;
arm_gic_dist_init
(
0
,
gic_dist_base
,
gic_irq_start
);
arm_gic_cpu_init
(
0
,
gic_cpu_base
);
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void
rt_hw_interrupt_mask
(
int
vector
)
{
arm_gic_mask
(
0
,
vector
);
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void
rt_hw_interrupt_umask
(
int
vector
)
{
arm_gic_umask
(
0
,
vector
);
}
/**
* This function returns the active interrupt number.
* @param none
*/
int
rt_hw_interrupt_get_irq
(
void
)
{
return
arm_gic_get_active_irq
(
0
);
}
/**
* This function acknowledges the interrupt.
* @param vector the interrupt number
*/
void
rt_hw_interrupt_ack
(
int
vector
)
{
arm_gic_ack
(
0
,
vector
);
}
/**
* This function set interrupt CPU targets.
* @param vector: the interrupt number
* cpu_mask: target cpus mask, one bit for one core
*/
void
rt_hw_interrupt_set_target_cpus
(
int
vector
,
unsigned
int
cpu_mask
)
{
arm_gic_set_cpu
(
0
,
vector
,
cpu_mask
);
}
/**
* This function get interrupt CPU targets.
* @param vector: the interrupt number
* @return target cpus mask, one bit for one core
*/
unsigned
int
rt_hw_interrupt_get_target_cpus
(
int
vector
)
{
return
arm_gic_get_target_cpu
(
0
,
vector
);
}
/**
* This function set interrupt triger mode.
* @param vector: the interrupt number
* mode: interrupt triger mode; 0: level triger, 1: edge triger
*/
void
rt_hw_interrupt_set_triger_mode
(
int
vector
,
unsigned
int
mode
)
{
arm_gic_set_configuration
(
0
,
vector
,
mode
);
}
/**
* This function get interrupt triger mode.
* @param vector: the interrupt number
* @return interrupt triger mode; 0: level triger, 1: edge triger
*/
unsigned
int
rt_hw_interrupt_get_triger_mode
(
int
vector
)
{
return
arm_gic_get_configuration
(
0
,
vector
);
}
/**
* This function set interrupt pending flag.
* @param vector: the interrupt number
*/
void
rt_hw_interrupt_set_pending
(
int
vector
)
{
arm_gic_set_pending_irq
(
0
,
vector
);
}
/**
* This function get interrupt pending flag.
* @param vector: the interrupt number
* @return interrupt pending flag, 0: not pending; 1: pending
*/
unsigned
int
rt_hw_interrupt_get_pending
(
int
vector
)
{
return
arm_gic_get_pending_irq
(
0
,
vector
);
}
/**
* This function clear interrupt pending flag.
* @param vector: the interrupt number
*/
void
rt_hw_interrupt_clear_pending
(
int
vector
)
{
arm_gic_clear_pending_irq
(
0
,
vector
);
}
/**
* This function set interrupt priority value.
* @param vector: the interrupt number
* priority: the priority of interrupt to set
*/
void
rt_hw_interrupt_set_priority
(
int
vector
,
unsigned
int
priority
)
{
arm_gic_set_priority
(
0
,
vector
,
priority
);
}
/**
* This function get interrupt priority.
* @param vector: the interrupt number
* @return interrupt priority value
*/
unsigned
int
rt_hw_interrupt_get_priority
(
int
vector
)
{
return
arm_gic_get_priority
(
0
,
vector
);
}
/**
* This function set priority masking threshold.
* @param priority: priority masking threshold
*/
void
rt_hw_interrupt_set_priority_mask
(
unsigned
int
priority
)
{
arm_gic_set_interface_prior_mask
(
0
,
priority
);
}
/**
* This function get priority masking threshold.
* @param none
* @return priority masking threshold
*/
unsigned
int
rt_hw_interrupt_get_priority_mask
(
void
)
{
return
arm_gic_get_interface_prior_mask
(
0
);
}
/**
* This function set priority grouping field split point.
* @param bits: priority grouping field split point
* @return 0: success; -1: failed
*/
int
rt_hw_interrupt_set_prior_group_bits
(
unsigned
int
bits
)
{
int
status
;
if
(
bits
<
8
)
{
arm_gic_set_binary_point
(
0
,
(
7
-
bits
));
status
=
0
;
}
else
{
status
=
-
1
;
}
return
(
status
);
}
/**
* This function get priority grouping field split point.
* @param none
* @return priority grouping field split point
*/
unsigned
int
rt_hw_interrupt_get_prior_group_bits
(
void
)
{
unsigned
int
bp
;
bp
=
arm_gic_get_binary_point
(
0
)
&
0x07
;
return
(
7
-
bp
);
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
rt_isr_handler_t
rt_hw_interrupt_install
(
int
vector
,
rt_isr_handler_t
handler
,
void
*
param
,
const
char
*
name
)
{
rt_isr_handler_t
old_handler
=
RT_NULL
;
if
(
vector
<
MAX_HANDLERS
)
{
old_handler
=
isr_table
[
vector
].
handler
;
if
(
handler
!=
RT_NULL
)
{
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy
(
isr_table
[
vector
].
name
,
name
,
RT_NAME_MAX
);
#endif
/* RT_USING_INTERRUPT_INFO */
isr_table
[
vector
].
handler
=
handler
;
isr_table
[
vector
].
param
=
param
;
}
}
return
old_handler
;
}
#ifdef RT_USING_SMP
void
rt_hw_ipi_send
(
int
ipi_vector
,
unsigned
int
cpu_mask
)
{
arm_gic_send_sgi
(
0
,
ipi_vector
,
cpu_mask
,
0
);
}
void
rt_hw_ipi_handler_install
(
int
ipi_vector
,
rt_isr_handler_t
ipi_isr_handler
)
{
/* note: ipi_vector maybe different with irq_vector */
rt_hw_interrupt_install
(
ipi_vector
,
ipi_isr_handler
,
0
,
"IPI_HANDLER"
);
}
#endif
bsp/nuvoton/libraries/ma35/libcpu/aarch64/interrupt.h
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-04-16 bigmagic first version
*/
#ifndef __INTERRUPT_H__
#define __INTERRUPT_H__
#include <rthw.h>
#include <board.h>
#define INT_IRQ 0x00
#define INT_FIQ 0x01
void
rt_hw_interrupt_init
(
void
);
void
rt_hw_interrupt_mask
(
int
vector
);
void
rt_hw_interrupt_umask
(
int
vector
);
int
rt_hw_interrupt_get_irq
(
void
);
void
rt_hw_interrupt_ack
(
int
vector
);
void
rt_hw_interrupt_set_target_cpus
(
int
vector
,
unsigned
int
cpu_mask
);
unsigned
int
rt_hw_interrupt_get_target_cpus
(
int
vector
);
void
rt_hw_interrupt_set_triger_mode
(
int
vector
,
unsigned
int
mode
);
unsigned
int
rt_hw_interrupt_get_triger_mode
(
int
vector
);
void
rt_hw_interrupt_set_pending
(
int
vector
);
unsigned
int
rt_hw_interrupt_get_pending
(
int
vector
);
void
rt_hw_interrupt_clear_pending
(
int
vector
);
void
rt_hw_interrupt_set_priority
(
int
vector
,
unsigned
int
priority
);
unsigned
int
rt_hw_interrupt_get_priority
(
int
vector
);
void
rt_hw_interrupt_set_priority_mask
(
unsigned
int
priority
);
unsigned
int
rt_hw_interrupt_get_priority_mask
(
void
);
int
rt_hw_interrupt_set_prior_group_bits
(
unsigned
int
bits
);
unsigned
int
rt_hw_interrupt_get_prior_group_bits
(
void
);
rt_isr_handler_t
rt_hw_interrupt_install
(
int
vector
,
rt_isr_handler_t
handler
,
void
*
param
,
const
char
*
name
);
#ifdef RT_USING_SMP
void
rt_hw_ipi_send
(
int
ipi_vector
,
unsigned
int
cpu_mask
);
void
rt_hw_ipi_handler_install
(
int
ipi_vector
,
rt_isr_handler_t
ipi_isr_handler
);
#endif
#endif
bsp/nuvoton/libraries/ma35/libcpu/aarch64/mmu.c
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-02-20 bigmagic first version
* 2021-06-25 Wayne Support EL3
*/
#include <mmu.h>
#include <stddef.h>
#include <rthw.h>
#define TTBR_CNP 1
typedef
unsigned
long
int
uint64_t
;
static
unsigned
long
main_tbl
[
512
*
20
]
__attribute__
((
aligned
(
4096
)));
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
#define PMD_TYPE_SECT (1 << 0)
#define PMD_TYPE_TABLE (3 << 0)
#define PTE_TYPE_PAGE (3 << 0)
#define BITS_PER_VA 39
/* Granule size of 4KB is being used */
#define GRANULE_SIZE_SHIFT 12
#define GRANULE_SIZE (1 << GRANULE_SIZE_SHIFT)
#define XLAT_ADDR_MASK ((1UL << BITS_PER_VA) - GRANULE_SIZE)
#define PMD_TYPE_MASK (3 << 0)
int
free_idx
=
1
;
void
__asm_invalidate_icache_all
(
void
);
void
__asm_flush_dcache_all
(
void
);
int
__asm_flush_l3_cache
(
void
);
void
__asm_flush_dcache_range
(
unsigned
long
long
start
,
unsigned
long
long
end
);
void
__asm_invalidate_dcache_all
(
void
);
void
__asm_invalidate_icache_all
(
void
);
static
void
nonmmu_memset
(
char
*
dst
,
char
v
,
size_t
len
)
{
while
(
len
--
)
{
*
dst
++
=
v
;
}
}
static
unsigned
long
__page_off
=
0
;
static
unsigned
long
get_free_page
(
void
)
{
__page_off
+=
512
;
return
(
unsigned
long
)(
main_tbl
+
__page_off
);
}
static
inline
unsigned
int
get_sctlr
(
void
)
{
unsigned
int
val
;
asm
volatile
(
"mrs %0, sctlr_el1"
:
"=r"
(
val
)
:
:
"cc"
);
return
val
;
}
static
inline
void
set_sctlr
(
unsigned
int
val
)
{
asm
volatile
(
"msr sctlr_el1, %0"
:
:
"r"
(
val
)
:
"cc"
);
asm
volatile
(
"isb"
);
}
void
mmu_init
(
void
)
{
unsigned
long
val64
;
unsigned
long
val32
;
val64
=
0x007f6eUL
;
__asm__
volatile
(
"msr MAIR_EL1, %0
\n
dsb sy
\n
"
::
"r"
(
val64
));
__asm__
volatile
(
"mrs %0, MAIR_EL1
\n
dsb sy
\n
"
:
"=r"
(
val64
));
//TCR_EL1
val32
=
(
16UL
<<
0
)
//48bit
|
(
0x0UL
<<
6
)
|
(
0x0UL
<<
7
)
|
(
0x3UL
<<
8
)
|
(
0x3UL
<<
10
)
//Inner Shareable
|
(
0x2UL
<<
12
)
|
(
0x0UL
<<
14
)
//4K
|
(
0x0UL
<<
16
)
|
(
0x0UL
<<
22
)
|
(
0x1UL
<<
23
)
|
(
0x2UL
<<
30
)
|
(
0x1UL
<<
32
)
|
(
0x0UL
<<
35
)
|
(
0x0UL
<<
36
)
|
(
0x0UL
<<
37
)
|
(
0x0UL
<<
38
);
__asm__
volatile
(
"msr TCR_EL1, %0
\n
"
::
"r"
(
val32
));
__asm__
volatile
(
"mrs %0, TCR_EL1
\n
"
:
"=r"
(
val32
));
__asm__
volatile
(
"msr TTBR0_EL1, %0
\n
dsb sy
\n
"
::
"r"
(
main_tbl
));
__asm__
volatile
(
"mrs %0, TTBR0_EL1
\n
dsb sy
\n
"
:
"=r"
(
val64
));
nonmmu_memset
((
char
*
)
main_tbl
,
0
,
4096
);
}
void
mmu_enable
(
void
)
{
unsigned
long
val64
;
unsigned
long
val32
;
__asm__
volatile
(
"mrs %0, SCTLR_EL1
\n
"
:
"=r"
(
val64
));
val64
&=
~
0x1000
;
//disable I
__asm__
volatile
(
"dmb sy
\n
msr SCTLR_EL1, %0
\n
isb sy
\n
"
::
"r"
(
val64
));
__asm__
volatile
(
"IC IALLUIS
\n
dsb sy
\n
isb sy
\n
"
);
__asm__
volatile
(
"tlbi vmalle1
\n
dsb sy
\n
isb sy
\n
"
);
//SCTLR_EL1, turn on mmu
__asm__
volatile
(
"mrs %0, SCTLR_EL1
\n
"
:
"=r"
(
val32
));
val32
|=
0x1005
;
//enable mmu, I C M
__asm__
volatile
(
"dmb sy
\n
msr SCTLR_EL1, %0
\n
isb sy
\n
"
::
"r"
(
val32
));
rt_hw_icache_enable
();
rt_hw_dcache_enable
();
}
static
int
map_single_page_2M
(
unsigned
long
*
lv0_tbl
,
unsigned
long
va
,
unsigned
long
pa
,
unsigned
long
attr
)
{
int
level
;
unsigned
long
*
cur_lv_tbl
=
lv0_tbl
;
unsigned
long
page
;
unsigned
long
off
;
int
level_shift
=
39
;
if
(
va
&
(
0x200000UL
-
1
))
{
return
MMU_MAP_ERROR_VANOTALIGN
;
}
if
(
pa
&
(
0x200000UL
-
1
))
{
return
MMU_MAP_ERROR_PANOTALIGN
;
}
for
(
level
=
0
;
level
<
2
;
level
++
)
{
off
=
(
va
>>
level_shift
);
off
&=
MMU_LEVEL_MASK
;
if
((
cur_lv_tbl
[
off
]
&
1
)
==
0
)
{
page
=
get_free_page
();
if
(
!
page
)
{
return
MMU_MAP_ERROR_NOPAGE
;
}
nonmmu_memset
((
char
*
)
page
,
0
,
4096
);
cur_lv_tbl
[
off
]
=
page
|
0x3UL
;
}
page
=
cur_lv_tbl
[
off
];
if
(
!
(
page
&
0x2
))
{
//is block! error!
return
MMU_MAP_ERROR_CONFLICT
;
}
cur_lv_tbl
=
(
unsigned
long
*
)(
page
&
0x0000fffffffff000UL
);
level_shift
-=
9
;
}
attr
&=
0xfff0000000000ffcUL
;
pa
|=
(
attr
|
0x1UL
);
//block
off
=
(
va
>>
21
);
off
&=
MMU_LEVEL_MASK
;
cur_lv_tbl
[
off
]
=
pa
;
return
0
;
}
int
armv8_map_2M
(
unsigned
long
va
,
unsigned
long
pa
,
int
count
,
unsigned
long
attr
)
{
int
i
;
int
ret
;
if
(
va
&
(
0x200000
-
1
))
{
return
-
1
;
}
if
(
pa
&
(
0x200000
-
1
))
{
return
-
1
;
}
for
(
i
=
0
;
i
<
count
;
i
++
)
{
ret
=
map_single_page_2M
((
unsigned
long
*
)
main_tbl
,
va
,
pa
,
attr
);
va
+=
0x200000
;
pa
+=
0x200000
;
if
(
ret
!=
0
)
{
return
ret
;
}
}
return
0
;
}
static
void
set_table
(
uint64_t
*
pt
,
uint64_t
*
table_addr
)
{
uint64_t
val
;
val
=
(
0x3UL
|
(
uint64_t
)
table_addr
);
*
pt
=
val
;
}
static
uint64_t
*
create_table
(
void
)
{
uint64_t
*
new_table
=
(
uint64_t
*
)((
unsigned
char
*
)
&
main_tbl
[
0
]
+
free_idx
*
4096
);
//+ free_idx * GRANULE_SIZE;
/* Mark all entries as invalid */
nonmmu_memset
((
char
*
)
new_table
,
0
,
4096
);
free_idx
++
;
return
new_table
;
}
static
int
pte_type
(
uint64_t
*
pte
)
{
return
*
pte
&
PMD_TYPE_MASK
;
}
static
int
level2shift
(
int
level
)
{
/* Page is 12 bits wide, every level translates 9 bits */
return
(
12
+
9
*
(
3
-
level
));
}
static
uint64_t
*
get_level_table
(
uint64_t
*
pte
)
{
uint64_t
*
table
=
(
uint64_t
*
)(
*
pte
&
XLAT_ADDR_MASK
);
if
(
pte_type
(
pte
)
!=
PMD_TYPE_TABLE
)
{
table
=
create_table
();
set_table
(
pte
,
table
);
}
return
table
;
}
static
void
map_region
(
uint64_t
virt
,
uint64_t
phys
,
uint64_t
size
,
uint64_t
attr
)
{
uint64_t
block_size
=
0
;
uint64_t
block_shift
=
0
;
uint64_t
*
pte
;
uint64_t
idx
=
0
;
uint64_t
addr
=
0
;
uint64_t
*
table
=
0
;
int
level
=
0
;
addr
=
virt
;
while
(
size
)
{
table
=
&
main_tbl
[
0
];
for
(
level
=
0
;
level
<
4
;
level
++
)
{
block_shift
=
level2shift
(
level
);
idx
=
addr
>>
block_shift
;
idx
=
idx
%
512
;
block_size
=
(
uint64_t
)(
1L
<<
block_shift
);
pte
=
table
+
idx
;
if
(
size
>=
block_size
&&
IS_ALIGNED
(
addr
,
block_size
))
{
attr
&=
0xfff0000000000ffcUL
;
if
(
level
!=
3
)
{
*
pte
=
phys
|
(
attr
|
0x1UL
);
}
else
{
*
pte
=
phys
|
(
attr
|
0x3UL
);
}
addr
+=
block_size
;
phys
+=
block_size
;
size
-=
block_size
;
break
;
}
table
=
get_level_table
(
pte
);
}
}
}
void
armv8_map
(
unsigned
long
va
,
unsigned
long
pa
,
unsigned
long
size
,
unsigned
long
attr
)
{
map_region
(
va
,
pa
,
size
,
attr
);
}
void
rt_hw_dcache_enable
(
void
)
{
if
(
!
(
get_sctlr
()
&
CR_M
))
{
rt_kprintf
(
"please init mmu!
\n
"
);
}
else
{
set_sctlr
(
get_sctlr
()
|
CR_C
);
}
}
void
rt_hw_dcache_flush_all
(
void
)
{
int
ret
;
__asm_flush_dcache_all
();
ret
=
__asm_flush_l3_cache
();
if
(
ret
)
{
rt_kprintf
(
"flushing dcache returns 0x%x
\n
"
,
ret
);
}
else
{
rt_kprintf
(
"flushing dcache successfully.
\n
"
);
}
}
void
rt_hw_dcache_flush_range
(
unsigned
long
start_addr
,
unsigned
long
size
)
{
__asm_flush_dcache_range
(
start_addr
,
start_addr
+
size
);
}
void
rt_hw_dcache_invalidate_range
(
unsigned
long
start_addr
,
unsigned
long
size
)
{
__asm_flush_dcache_range
(
start_addr
,
start_addr
+
size
);
}
void
rt_hw_cpu_dcache_clean_inv
(
unsigned
long
start_addr
,
unsigned
long
size
)
{
__asm_flush_dcache_range
(
start_addr
,
start_addr
+
size
);
}
void
rt_hw_cpu_dcache_invalidate
(
void
*
start_addr
,
int
size
)
{
rt_hw_dcache_invalidate_range
((
unsigned
long
)
start_addr
,
(
unsigned
long
)
size
);
}
void
rt_hw_dcache_invalidate_all
(
void
)
{
__asm_invalidate_dcache_all
();
}
void
rt_hw_dcache_disable
(
void
)
{
/* if cache isn't enabled no need to disable */
if
(
!
(
get_sctlr
()
&
CR_C
))
{
rt_kprintf
(
"need enable cache!
\n
"
);
return
;
}
set_sctlr
(
get_sctlr
()
&
~
CR_C
);
}
//icache
void
rt_hw_icache_enable
(
void
)
{
__asm_invalidate_icache_all
();
set_sctlr
(
get_sctlr
()
|
CR_I
);
}
void
rt_hw_icache_invalidate_all
(
void
)
{
__asm_invalidate_icache_all
();
}
void
rt_hw_icache_disable
(
void
)
{
set_sctlr
(
get_sctlr
()
&
~
CR_I
);
}
bsp/nuvoton/libraries/ma35/libcpu/aarch64/mmu.h
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-02-20 bigmagic first version
*/
#ifndef __MMU_H__
#define __MMU_H__
/*
* CR1 bits (CP#15 CR1)
*/
#define CR_M (1 << 0)
/* MMU enable */
#define CR_A (1 << 1)
/* Alignment abort enable */
#define CR_C (1 << 2)
/* Dcache enable */
#define CR_W (1 << 3)
/* Write buffer enable */
#define CR_P (1 << 4)
/* 32-bit exception handler */
#define CR_D (1 << 5)
/* 32-bit data address range */
#define CR_L (1 << 6)
/* Implementation defined */
#define CR_B (1 << 7)
/* Big endian */
#define CR_S (1 << 8)
/* System MMU protection */
#define CR_R (1 << 9)
/* ROM MMU protection */
#define CR_F (1 << 10)
/* Implementation defined */
#define CR_Z (1 << 11)
/* Implementation defined */
#define CR_I (1 << 12)
/* Icache enable */
#define CR_V (1 << 13)
/* Vectors relocated to 0xffff0000 */
#define CR_RR (1 << 14)
/* Round Robin cache replacement */
#define CR_L4 (1 << 15)
/* LDR pc can set T bit */
#define CR_DT (1 << 16)
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21)
/* Fast interrupt (lower latency mode) */
#define CR_U (1 << 22)
/* Unaligned access operation */
#define CR_XP (1 << 23)
/* Extended page tables */
#define CR_VE (1 << 24)
/* Vectored interrupts */
#define CR_EE (1 << 25)
/* Exception (Big) Endian */
#define CR_TRE (1 << 28)
/* TEX remap enable */
#define CR_AFE (1 << 29)
/* Access flag enable */
#define CR_TE (1 << 30)
/* Thumb exception enable */
#define MMU_LEVEL_MASK 0x1ffUL
#define MMU_MAP_ERROR_VANOTALIGN -1
#define MMU_MAP_ERROR_PANOTALIGN -2
#define MMU_MAP_ERROR_NOPAGE -3
#define MMU_MAP_ERROR_CONFLICT -4
#define MEM_ATTR_MEMORY ((0x1UL << 10) | (0x2UL << 8) | (0x0UL << 6) | (0x1UL << 2))
#define MEM_ATTR_IO ((0x1UL << 10) | (0x2UL << 8) | (0x0UL << 6) | (0x2UL << 2))
#define BUS_ADDRESS(phys) (((phys) & ~0xC0000000) | 0xC0000000)
void
mmu_init
(
void
);
void
mmu_enable
(
void
);
int
armv8_map_2M
(
unsigned
long
va
,
unsigned
long
pa
,
int
count
,
unsigned
long
attr
);
void
armv8_map
(
unsigned
long
va
,
unsigned
long
pa
,
unsigned
long
size
,
unsigned
long
attr
);
//dcache
void
rt_hw_dcache_enable
(
void
);
void
rt_hw_dcache_flush_all
(
void
);
void
rt_hw_dcache_flush_range
(
unsigned
long
start_addr
,
unsigned
long
size
);
void
rt_hw_dcache_invalidate_range
(
unsigned
long
start_addr
,
unsigned
long
size
);
void
rt_hw_dcache_invalidate_all
(
void
);
void
rt_hw_dcache_disable
(
void
);
//icache
void
rt_hw_icache_enable
(
void
);
void
rt_hw_icache_invalidate_all
(
void
);
void
rt_hw_icache_disable
(
void
);
#endif
/*__MMU_H__*/
bsp/nuvoton/libraries/ma35/libcpu/aarch64/secondary_cpu.c
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2021-06-17 Wayne the first version
*/
#include <rtthread.h>
#include "board.h"
#include "gic.h"
#include "cp15.h"
#ifdef RT_USING_SMP
#include "interrupt.h"
extern
int
rt_hw_timer_init
(
void
);
extern
void
secondary_cpu_start
(
void
);
/*
For core-1, core-2 and core-3.
*/
void
rt_hw_secondary_cpu_up
(
void
)
{
rt_uint32_t
cpu_mask
=
2
;
rt_int32_t
i
;
rt_cpu_dcache_clean_flush
();
rt_cpu_icache_flush
();
rt_kprintf
(
"rt_hw_secondary_cpu_up is processing
\r\n
"
);
for
(
i
=
1
;
i
<
RT_CPUS_NR
,
i
!=
4
;
i
++
)
{
rt_kprintf
(
"Boot Core-%d
\n
"
,
i
);
//FPsci_CpuOn(1 << i, (rt_uint32_t)secondary_cpu_start);
cpu_mask
<<=
1
;
__SEV
();
__DSB
();
__ISB
();
__DSB
();
rt_hw_ipi_send
(
RT_SCHEDULE_IPI
,
cpu_mask
);
}
}
void
secondary_cpu_c_start
(
void
)
{
uint32_t
id
=
rt_hw_cpu_id
();
rt_kprintf
(
"cpu = 0x%08x
\n
"
,
id
);
rt_hw_timer_init
();
/* initialize vector table */
rt_hw_vector_init
();
rt_hw_spin_lock
(
&
_cpus_lock
);
rt_hw_interrupt_set_priority
(
RT_SCHEDULE_IPI
,
16
);
rt_hw_interrupt_umask
(
RT_SCHEDULE_IPI
);
rt_system_scheduler_start
();
}
void
rt_hw_secondary_cpu_idle_exec
(
void
)
{
asm
volatile
(
"wfe"
::
:
"memory"
,
"cc"
);
}
#endif
bsp/nuvoton/libraries/ma35/libcpu/aarch64/stack.c
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-23 Bernard the first version
* 2011-10-05 Bernard add thumb mode
*/
#include <rtthread.h>
#include <board.h>
#include <armv8.h>
#define INITIAL_SPSR_EL3 (PSTATE_EL3 | SP_EL0)
#define INITIAL_SPSR_EL2 (PSTATE_EL2 | SP_EL0)
#define INITIAL_SPSR_EL1 (PSTATE_EL1 | SP_EL0)
/**
* This function will initialize thread stack
*
* @param tentry the entry of thread
* @param parameter the parameter of entry
* @param stack_addr the beginning stack address
* @param texit the function will be called when thread exit
*
* @return stack address
*/
rt_uint8_t
*
rt_hw_stack_init
(
void
*
tentry
,
void
*
parameter
,
rt_uint8_t
*
stack_addr
,
void
*
texit
)
{
rt_ubase_t
*
stk
;
rt_ubase_t
current_el
;
stk
=
(
rt_ubase_t
*
)
stack_addr
;
*
(
--
stk
)
=
(
rt_ubase_t
)
11
;
/* X1 */
*
(
--
stk
)
=
(
rt_ubase_t
)
parameter
;
/* X0 */
*
(
--
stk
)
=
(
rt_ubase_t
)
33
;
/* X3 */
*
(
--
stk
)
=
(
rt_ubase_t
)
22
;
/* X2 */
*
(
--
stk
)
=
(
rt_ubase_t
)
55
;
/* X5 */
*
(
--
stk
)
=
(
rt_ubase_t
)
44
;
/* X4 */
*
(
--
stk
)
=
(
rt_ubase_t
)
77
;
/* X7 */
*
(
--
stk
)
=
(
rt_ubase_t
)
66
;
/* X6 */
*
(
--
stk
)
=
(
rt_ubase_t
)
99
;
/* X9 */
*
(
--
stk
)
=
(
rt_ubase_t
)
88
;
/* X8 */
*
(
--
stk
)
=
(
rt_ubase_t
)
11
;
/* X11 */
*
(
--
stk
)
=
(
rt_ubase_t
)
10
;
/* X10 */
*
(
--
stk
)
=
(
rt_ubase_t
)
13
;
/* X13 */
*
(
--
stk
)
=
(
rt_ubase_t
)
12
;
/* X12 */
*
(
--
stk
)
=
(
rt_ubase_t
)
15
;
/* X15 */
*
(
--
stk
)
=
(
rt_ubase_t
)
14
;
/* X14 */
*
(
--
stk
)
=
(
rt_ubase_t
)
17
;
/* X17 */
*
(
--
stk
)
=
(
rt_ubase_t
)
16
;
/* X16 */
*
(
--
stk
)
=
(
rt_ubase_t
)
19
;
/* X19 */
*
(
--
stk
)
=
(
rt_ubase_t
)
18
;
/* X18 */
*
(
--
stk
)
=
(
rt_ubase_t
)
21
;
/* X21 */
*
(
--
stk
)
=
(
rt_ubase_t
)
20
;
/* X20 */
*
(
--
stk
)
=
(
rt_ubase_t
)
23
;
/* X23 */
*
(
--
stk
)
=
(
rt_ubase_t
)
22
;
/* X22 */
*
(
--
stk
)
=
(
rt_ubase_t
)
25
;
/* X25 */
*
(
--
stk
)
=
(
rt_ubase_t
)
24
;
/* X24 */
*
(
--
stk
)
=
(
rt_ubase_t
)
27
;
/* X27 */
*
(
--
stk
)
=
(
rt_ubase_t
)
26
;
/* X26 */
*
(
--
stk
)
=
(
rt_ubase_t
)
29
;
/* X29 */
*
(
--
stk
)
=
(
rt_ubase_t
)
28
;
/* X28 */
*
(
--
stk
)
=
(
rt_ubase_t
)
0
;
/* XZR - has no effect, used so there are an even number of registers. */
*
(
--
stk
)
=
(
rt_ubase_t
)
texit
;
/* X30 - procedure call link register. */
current_el
=
rt_hw_get_current_el
();
if
(
current_el
==
3
)
{
*
(
--
stk
)
=
INITIAL_SPSR_EL3
;
}
else
if
(
current_el
==
2
)
{
*
(
--
stk
)
=
INITIAL_SPSR_EL2
;
}
else
{
*
(
--
stk
)
=
INITIAL_SPSR_EL1
;
}
*
(
--
stk
)
=
(
rt_ubase_t
)
tentry
;
/* Exception return address. */
/* return task's current stack address */
return
(
rt_uint8_t
*
)
stk
;
}
bsp/nuvoton/libraries/ma35/libcpu/aarch64/trap.c
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version
*/
#include <rtthread.h>
#include <rthw.h>
#include "interrupt.h"
#include "armv8.h"
extern
struct
rt_thread
*
rt_current_thread
;
#ifdef RT_USING_FINSH
extern
long
list_thread
(
void
);
#endif
/**
* this function will show registers of CPU
*
* @param regs the registers point
*/
void
rt_hw_show_register
(
struct
rt_hw_exp_stack
*
regs
)
{
rt_kprintf
(
"Execption:
\n
"
);
rt_kprintf
(
"r00:0x%16.16lx r01:0x%16.16lx r02:0x%16.16lx r03:0x%16.16lx
\n
"
,
regs
->
x0
,
regs
->
x1
,
regs
->
x2
,
regs
->
x3
);
rt_kprintf
(
"r04:0x%16.16lx r05:0x%16.16lx r06:0x%16.16lx r07:0x%16.16lx
\n
"
,
regs
->
x4
,
regs
->
x5
,
regs
->
x6
,
regs
->
x7
);
rt_kprintf
(
"r08:0x%16.16lx r09:0x%16.16lx r10:0x%16.16lx r11:0x%16.16lx
\n
"
,
regs
->
x8
,
regs
->
x9
,
regs
->
x10
,
regs
->
x11
);
rt_kprintf
(
"r12:0x%16.16lx r13:0x%16.16lx r14:0x%16.16lx r15:0x%16.16lx
\n
"
,
regs
->
x12
,
regs
->
x13
,
regs
->
x14
,
regs
->
x15
);
rt_kprintf
(
"r16:0x%16.16lx r17:0x%16.16lx r18:0x%16.16lx r19:0x%16.16lx
\n
"
,
regs
->
x16
,
regs
->
x17
,
regs
->
x18
,
regs
->
x19
);
rt_kprintf
(
"r20:0x%16.16lx r21:0x%16.16lx r22:0x%16.16lx r23:0x%16.16lx
\n
"
,
regs
->
x20
,
regs
->
x21
,
regs
->
x22
,
regs
->
x23
);
rt_kprintf
(
"r24:0x%16.16lx r25:0x%16.16lx r26:0x%16.16lx r27:0x%16.16lx
\n
"
,
regs
->
x24
,
regs
->
x25
,
regs
->
x26
,
regs
->
x27
);
rt_kprintf
(
"r28:0x%16.16lx r29:0x%16.16lx r30:0x%16.16lx
\n
"
,
regs
->
x28
,
regs
->
x29
,
regs
->
x30
);
rt_kprintf
(
"spsr:0x%16.16lx
\n
"
,
regs
->
spsr
);
rt_kprintf
(
"return pc:0x%16.16lx
\n
"
,
regs
->
pc
);
}
/**
* When comes across an instruction which it cannot handle,
* it takes the undefined instruction trap.
*
* @param regs system registers
*
* @note never invoke this function in application
*/
void
rt_hw_trap_error
(
struct
rt_hw_exp_stack
*
regs
)
{
rt_kprintf
(
"error exception:
\n
"
);
rt_hw_show_register
(
regs
);
#ifdef RT_USING_FINSH
list_thread
();
#endif
rt_hw_cpu_shutdown
();
}
#define GIC_ACK_INTID_MASK 0x000003ff
void
rt_hw_trap_irq
(
void
)
{
void
*
param
;
int
ir
;
rt_isr_handler_t
isr_func
;
extern
struct
rt_irq_desc
isr_table
[];
ir
=
rt_hw_interrupt_get_irq
();
if
(
ir
==
1023
)
{
/* Spurious interrupt */
return
;
}
/* get interrupt service routine */
isr_func
=
isr_table
[
ir
].
handler
;
#ifdef RT_USING_INTERRUPT_INFO
isr_table
[
ir
].
counter
++
;
#endif
if
(
isr_func
)
{
/* Interrupt for myself. */
param
=
isr_table
[
ir
].
param
;
/* turn to interrupt service routine */
isr_func
(
ir
,
param
);
}
/* end of interrupt */
rt_hw_interrupt_ack
(
ir
);
}
void
rt_hw_trap_fiq
(
void
)
{
void
*
param
;
int
ir
;
rt_isr_handler_t
isr_func
;
extern
struct
rt_irq_desc
isr_table
[];
ir
=
rt_hw_interrupt_get_irq
();
/* get interrupt service routine */
isr_func
=
isr_table
[
ir
].
handler
;
if
(
isr_func
)
{
param
=
isr_table
[
ir
].
param
;
/* turn to interrupt service routine */
isr_func
(
ir
,
param
);
}
/* end of interrupt */
rt_hw_interrupt_ack
(
ir
);
}
bsp/nuvoton/libraries/ma35/libcpu/aarch64/vector_gcc.S
已删除
100644 → 0
浏览文件 @
d0b70ab7
/*
*
Copyright
(
c
)
2006
-
2020
,
RT
-
Thread
Development
Team
*
*
SPDX
-
License
-
Identifier
:
Apache
-
2
.0
*
*
Date
Author
Notes
*
2018
-
10
-
06
ZhaoXiaowei
the
first
version
*/
.
text
.
globl
system_vectors
.
globl
vector_error
.
globl
vector_irq
.
globl
vector_fiq
system_vectors
:
.
align
11
.
set
VBAR
,
system_vectors
.
org
VBAR
//
Exception
from
CurrentEL
(
EL1
)
with
SP_EL0
(
SPSEL
=
1
)
.
org
(
VBAR
+
0x00
+
0
)
B
vector_error
//
Synchronous
.
org
(
VBAR
+
0x80
+
0
)
B
vector_irq
//
IRQ
/
vIRQ
.
org
(
VBAR
+
0x100
+
0
)
B
vector_fiq
//
FIQ
/
vFIQ
.
org
(
VBAR
+
0x180
+
0
)
B
vector_error
//
Error
/
vError
//
Exception
from
CurrentEL
(
EL1
)
with
SP_ELn
.
org
(
VBAR
+
0x200
+
0
)
B
vector_error
//
Synchronous
.
org
(
VBAR
+
0x280
+
0
)
B
vector_irq
//
IRQ
/
vIRQ
.
org
(
VBAR
+
0x300
+
0
)
B
vector_fiq
//
FIQ
/
vFIQ
.
org
(
VBAR
+
0x380
+
0
)
B
vector_error
//
Exception
from
lower
EL
,
aarch64
.
org
(
VBAR
+
0x400
+
0
)
B
vector_error
.
org
(
VBAR
+
0x480
+
0
)
B
vector_error
.
org
(
VBAR
+
0x500
+
0
)
B
vector_error
.
org
(
VBAR
+
0x580
+
0
)
B
vector_error
//
Exception
from
lower
EL
,
aarch32
.
org
(
VBAR
+
0x600
+
0
)
B
vector_error
.
org
(
VBAR
+
0x680
+
0
)
B
vector_error
.
org
(
VBAR
+
0x700
+
0
)
B
vector_error
.
org
(
VBAR
+
0x780
+
0
)
B
vector_error
.
org
(
VBAR
+
0x800
+
0
)
B
vector_error
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录