Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
ceaa1a13
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ceaa1a13
编写于
12年前
作者:
R
Russell King
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'arch-timers' into for-linus
Conflicts: arch/arm/include/asm/timex.h arch/arm/lib/delay.c
上级
ba4a63f8
56942fec
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
316 addition
and
125 deletion
+316
-125
arch/arm/include/asm/arch_timer.h
arch/arm/include/asm/arch_timer.h
+7
-1
arch/arm/include/asm/delay.h
arch/arm/include/asm/delay.h
+9
-0
arch/arm/include/asm/timex.h
arch/arm/include/asm/timex.h
+1
-5
arch/arm/kernel/arch_timer.c
arch/arm/kernel/arch_timer.c
+273
-110
arch/arm/lib/delay.c
arch/arm/lib/delay.c
+26
-9
未找到文件。
arch/arm/include/asm/arch_timer.h
浏览文件 @
ceaa1a13
...
...
@@ -2,11 +2,12 @@
#define __ASMARM_ARCH_TIMER_H
#include <asm/errno.h>
#include <linux/clocksource.h>
#ifdef CONFIG_ARM_ARCH_TIMER
#define ARCH_HAS_READ_CURRENT_TIMER
int
arch_timer_of_register
(
void
);
int
arch_timer_sched_clock_init
(
void
);
struct
timecounter
*
arch_timer_get_timecounter
(
void
);
#else
static
inline
int
arch_timer_of_register
(
void
)
{
...
...
@@ -17,6 +18,11 @@ static inline int arch_timer_sched_clock_init(void)
{
return
-
ENXIO
;
}
static
inline
struct
timecounter
*
arch_timer_get_timecounter
(
void
)
{
return
NULL
;
}
#endif
#endif
This diff is collapsed.
Click to expand it.
arch/arm/include/asm/delay.h
浏览文件 @
ceaa1a13
...
...
@@ -15,6 +15,11 @@
#ifndef __ASSEMBLY__
struct
delay_timer
{
unsigned
long
(
*
read_current_timer
)(
void
);
unsigned
long
freq
;
};
extern
struct
arm_delay_ops
{
void
(
*
delay
)(
unsigned
long
);
void
(
*
const_udelay
)(
unsigned
long
);
...
...
@@ -56,6 +61,10 @@ extern void __loop_delay(unsigned long loops);
extern
void
__loop_udelay
(
unsigned
long
usecs
);
extern
void
__loop_const_udelay
(
unsigned
long
);
/* Delay-loop timer registration. */
#define ARCH_HAS_READ_CURRENT_TIMER
extern
void
register_current_timer_delay
(
const
struct
delay_timer
*
timer
);
#endif
/* __ASSEMBLY__ */
#endif
/* defined(_ARM_DELAY_H) */
...
...
This diff is collapsed.
Click to expand it.
arch/arm/include/asm/timex.h
浏览文件 @
ceaa1a13
...
...
@@ -12,13 +12,9 @@
#ifndef _ASMARM_TIMEX_H
#define _ASMARM_TIMEX_H
#include <asm/arch_timer.h>
#include <mach/timex.h>
#ifdef ARCH_HAS_READ_CURRENT_TIMER
typedef
unsigned
long
cycles_t
;
#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
#endif
#include <asm-generic/timex.h>
#endif
This diff is collapsed.
Click to expand it.
arch/arm/kernel/arch_timer.c
浏览文件 @
ceaa1a13
...
...
@@ -21,18 +21,28 @@
#include <linux/io.h>
#include <asm/cputype.h>
#include <asm/delay.h>
#include <asm/localtimer.h>
#include <asm/arch_timer.h>
#include <asm/system_info.h>
#include <asm/sched_clock.h>
static
unsigned
long
arch_timer_rate
;
static
int
arch_timer_ppi
;
static
int
arch_timer_ppi2
;
enum
ppi_nr
{
PHYS_SECURE_PPI
,
PHYS_NONSECURE_PPI
,
VIRT_PPI
,
HYP_PPI
,
MAX_TIMER_PPI
};
static
int
arch_timer_ppi
[
MAX_TIMER_PPI
];
static
struct
clock_event_device
__percpu
**
arch_timer_evt
;
static
struct
delay_timer
arch_delay_timer
;
extern
void
init_current_timer_delay
(
unsigned
long
freq
)
;
static
bool
arch_timer_use_virtual
=
true
;
/*
* Architected system timer support.
...
...
@@ -46,50 +56,104 @@ extern void init_current_timer_delay(unsigned long freq);
#define ARCH_TIMER_REG_FREQ 1
#define ARCH_TIMER_REG_TVAL 2
static
void
arch_timer_reg_write
(
int
reg
,
u32
val
)
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
/*
* These register accessors are marked inline so the compiler can
* nicely work out which register we want, and chuck away the rest of
* the code. At least it does so with a recent GCC (4.6.3).
*/
static
inline
void
arch_timer_reg_write
(
const
int
access
,
const
int
reg
,
u32
val
)
{
switch
(
reg
)
{
case
ARCH_TIMER_REG_CTRL
:
asm
volatile
(
"mcr p15, 0, %0, c14, c2, 1"
:
:
"r"
(
val
));
break
;
case
ARCH_TIMER_REG_TVAL
:
asm
volatile
(
"mcr p15, 0, %0, c14, c2, 0"
:
:
"r"
(
val
));
break
;
if
(
access
==
ARCH_TIMER_PHYS_ACCESS
)
{
switch
(
reg
)
{
case
ARCH_TIMER_REG_CTRL
:
asm
volatile
(
"mcr p15, 0, %0, c14, c2, 1"
:
:
"r"
(
val
));
break
;
case
ARCH_TIMER_REG_TVAL
:
asm
volatile
(
"mcr p15, 0, %0, c14, c2, 0"
:
:
"r"
(
val
));
break
;
}
}
if
(
access
==
ARCH_TIMER_VIRT_ACCESS
)
{
switch
(
reg
)
{
case
ARCH_TIMER_REG_CTRL
:
asm
volatile
(
"mcr p15, 0, %0, c14, c3, 1"
:
:
"r"
(
val
));
break
;
case
ARCH_TIMER_REG_TVAL
:
asm
volatile
(
"mcr p15, 0, %0, c14, c3, 0"
:
:
"r"
(
val
));
break
;
}
}
isb
();
}
static
u32
arch_timer_reg_read
(
int
reg
)
static
inline
u32
arch_timer_reg_read
(
const
int
access
,
const
int
reg
)
{
u32
val
;
u32
val
=
0
;
if
(
access
==
ARCH_TIMER_PHYS_ACCESS
)
{
switch
(
reg
)
{
case
ARCH_TIMER_REG_CTRL
:
asm
volatile
(
"mrc p15, 0, %0, c14, c2, 1"
:
"=r"
(
val
));
break
;
case
ARCH_TIMER_REG_TVAL
:
asm
volatile
(
"mrc p15, 0, %0, c14, c2, 0"
:
"=r"
(
val
));
break
;
case
ARCH_TIMER_REG_FREQ
:
asm
volatile
(
"mrc p15, 0, %0, c14, c0, 0"
:
"=r"
(
val
));
break
;
}
}
switch
(
reg
)
{
case
ARCH_TIMER_REG_CTRL
:
asm
volatile
(
"mrc p15, 0, %0, c14, c2, 1"
:
"=r"
(
val
));
break
;
case
ARCH_TIMER_REG_FREQ
:
asm
volatile
(
"mrc p15, 0, %0, c14, c0, 0"
:
"=r"
(
val
));
break
;
case
ARCH_TIMER_REG_TVAL
:
asm
volatile
(
"mrc p15, 0, %0, c14, c2, 0"
:
"=r"
(
val
));
break
;
default:
BUG
();
if
(
access
==
ARCH_TIMER_VIRT_ACCESS
)
{
switch
(
reg
)
{
case
ARCH_TIMER_REG_CTRL
:
asm
volatile
(
"mrc p15, 0, %0, c14, c3, 1"
:
"=r"
(
val
));
break
;
case
ARCH_TIMER_REG_TVAL
:
asm
volatile
(
"mrc p15, 0, %0, c14, c3, 0"
:
"=r"
(
val
));
break
;
}
}
return
val
;
}
static
i
rqreturn_t
arch_timer_handler
(
int
irq
,
void
*
dev_id
)
static
i
nline
cycle_t
arch_timer_counter_read
(
const
int
access
)
{
struct
clock_event_device
*
evt
=
*
(
struct
clock_event_device
**
)
dev_id
;
unsigned
long
ctrl
;
cycle_t
cval
=
0
;
if
(
access
==
ARCH_TIMER_PHYS_ACCESS
)
asm
volatile
(
"mrrc p15, 0, %Q0, %R0, c14"
:
"=r"
(
cval
));
if
(
access
==
ARCH_TIMER_VIRT_ACCESS
)
asm
volatile
(
"mrrc p15, 1, %Q0, %R0, c14"
:
"=r"
(
cval
));
return
cval
;
}
static
inline
cycle_t
arch_counter_get_cntpct
(
void
)
{
return
arch_timer_counter_read
(
ARCH_TIMER_PHYS_ACCESS
);
}
ctrl
=
arch_timer_reg_read
(
ARCH_TIMER_REG_CTRL
);
static
inline
cycle_t
arch_counter_get_cntvct
(
void
)
{
return
arch_timer_counter_read
(
ARCH_TIMER_VIRT_ACCESS
);
}
static
irqreturn_t
inline
timer_handler
(
const
int
access
,
struct
clock_event_device
*
evt
)
{
unsigned
long
ctrl
;
ctrl
=
arch_timer_reg_read
(
access
,
ARCH_TIMER_REG_CTRL
);
if
(
ctrl
&
ARCH_TIMER_CTRL_IT_STAT
)
{
ctrl
|=
ARCH_TIMER_CTRL_IT_MASK
;
arch_timer_reg_write
(
ARCH_TIMER_REG_CTRL
,
ctrl
);
arch_timer_reg_write
(
access
,
ARCH_TIMER_REG_CTRL
,
ctrl
);
evt
->
event_handler
(
evt
);
return
IRQ_HANDLED
;
}
...
...
@@ -97,63 +161,100 @@ static irqreturn_t arch_timer_handler(int irq, void *dev_id)
return
IRQ_NONE
;
}
static
void
arch_timer_disable
(
vo
id
)
static
irqreturn_t
arch_timer_handler_virt
(
int
irq
,
void
*
dev_
id
)
{
unsigned
long
ctrl
;
struct
clock_event_device
*
evt
=
*
(
struct
clock_event_device
**
)
dev_id
;
ctrl
=
arch_timer_reg_read
(
ARCH_TIMER_REG_CTRL
);
ctrl
&=
~
ARCH_TIMER_CTRL_ENABLE
;
arch_timer_reg_write
(
ARCH_TIMER_REG_CTRL
,
ctrl
);
return
timer_handler
(
ARCH_TIMER_VIRT_ACCESS
,
evt
);
}
static
void
arch_timer_set_mode
(
enum
clock_event_mode
mode
,
struct
clock_event_device
*
clk
)
static
irqreturn_t
arch_timer_handler_phys
(
int
irq
,
void
*
dev_id
)
{
struct
clock_event_device
*
evt
=
*
(
struct
clock_event_device
**
)
dev_id
;
return
timer_handler
(
ARCH_TIMER_PHYS_ACCESS
,
evt
);
}
static
inline
void
timer_set_mode
(
const
int
access
,
int
mode
)
{
unsigned
long
ctrl
;
switch
(
mode
)
{
case
CLOCK_EVT_MODE_UNUSED
:
case
CLOCK_EVT_MODE_SHUTDOWN
:
arch_timer_disable
();
ctrl
=
arch_timer_reg_read
(
access
,
ARCH_TIMER_REG_CTRL
);
ctrl
&=
~
ARCH_TIMER_CTRL_ENABLE
;
arch_timer_reg_write
(
access
,
ARCH_TIMER_REG_CTRL
,
ctrl
);
break
;
default:
break
;
}
}
static
int
arch_timer_set_next_event
(
unsigned
long
evt
,
struct
clock_event_device
*
unused
)
static
void
arch_timer_set_mode_virt
(
enum
clock_event_mode
mode
,
struct
clock_event_device
*
clk
)
{
unsigned
long
ctrl
;
timer_set_mode
(
ARCH_TIMER_VIRT_ACCESS
,
mode
);
}
ctrl
=
arch_timer_reg_read
(
ARCH_TIMER_REG_CTRL
);
static
void
arch_timer_set_mode_phys
(
enum
clock_event_mode
mode
,
struct
clock_event_device
*
clk
)
{
timer_set_mode
(
ARCH_TIMER_PHYS_ACCESS
,
mode
);
}
static
inline
void
set_next_event
(
const
int
access
,
unsigned
long
evt
)
{
unsigned
long
ctrl
;
ctrl
=
arch_timer_reg_read
(
access
,
ARCH_TIMER_REG_CTRL
);
ctrl
|=
ARCH_TIMER_CTRL_ENABLE
;
ctrl
&=
~
ARCH_TIMER_CTRL_IT_MASK
;
arch_timer_reg_write
(
access
,
ARCH_TIMER_REG_TVAL
,
evt
);
arch_timer_reg_write
(
access
,
ARCH_TIMER_REG_CTRL
,
ctrl
);
}
arch_timer_reg_write
(
ARCH_TIMER_REG_TVAL
,
evt
);
arch_timer_reg_write
(
ARCH_TIMER_REG_CTRL
,
ctrl
);
static
int
arch_timer_set_next_event_virt
(
unsigned
long
evt
,
struct
clock_event_device
*
unused
)
{
set_next_event
(
ARCH_TIMER_VIRT_ACCESS
,
evt
);
return
0
;
}
static
int
arch_timer_set_next_event_phys
(
unsigned
long
evt
,
struct
clock_event_device
*
unused
)
{
set_next_event
(
ARCH_TIMER_PHYS_ACCESS
,
evt
);
return
0
;
}
static
int
__cpuinit
arch_timer_setup
(
struct
clock_event_device
*
clk
)
{
/* Be safe... */
arch_timer_disable
();
clk
->
features
=
CLOCK_EVT_FEAT_ONESHOT
|
CLOCK_EVT_FEAT_C3STOP
;
clk
->
name
=
"arch_sys_timer"
;
clk
->
rating
=
450
;
clk
->
set_mode
=
arch_timer_set_mode
;
clk
->
set_next_event
=
arch_timer_set_next_event
;
clk
->
irq
=
arch_timer_ppi
;
if
(
arch_timer_use_virtual
)
{
clk
->
irq
=
arch_timer_ppi
[
VIRT_PPI
];
clk
->
set_mode
=
arch_timer_set_mode_virt
;
clk
->
set_next_event
=
arch_timer_set_next_event_virt
;
}
else
{
clk
->
irq
=
arch_timer_ppi
[
PHYS_SECURE_PPI
];
clk
->
set_mode
=
arch_timer_set_mode_phys
;
clk
->
set_next_event
=
arch_timer_set_next_event_phys
;
}
clk
->
set_mode
(
CLOCK_EVT_MODE_SHUTDOWN
,
NULL
);
clockevents_config_and_register
(
clk
,
arch_timer_rate
,
0xf
,
0x7fffffff
);
*
__this_cpu_ptr
(
arch_timer_evt
)
=
clk
;
enable_percpu_irq
(
clk
->
irq
,
0
);
if
(
arch_timer_ppi2
)
enable_percpu_irq
(
arch_timer_ppi2
,
0
);
if
(
arch_timer_use_virtual
)
enable_percpu_irq
(
arch_timer_ppi
[
VIRT_PPI
],
0
);
else
{
enable_percpu_irq
(
arch_timer_ppi
[
PHYS_SECURE_PPI
],
0
);
if
(
arch_timer_ppi
[
PHYS_NONSECURE_PPI
])
enable_percpu_irq
(
arch_timer_ppi
[
PHYS_NONSECURE_PPI
],
0
);
}
return
0
;
}
...
...
@@ -173,8 +274,8 @@ static int arch_timer_available(void)
return
-
ENXIO
;
if
(
arch_timer_rate
==
0
)
{
arch_timer_reg_write
(
ARCH_TIMER_REG_CTRL
,
0
);
freq
=
arch_timer_reg_read
(
ARCH_TIMER_REG_FREQ
);
freq
=
arch_timer_reg_read
(
ARCH_TIMER_PHYS_ACCESS
,
ARCH_TIMER_REG_FREQ
);
/* Check the timer frequency. */
if
(
freq
==
0
)
{
...
...
@@ -185,52 +286,57 @@ static int arch_timer_available(void)
arch_timer_rate
=
freq
;
}
pr_info_once
(
"Architected local timer running at %lu.%02luMHz.
\n
"
,
arch_timer_rate
/
1000000
,
(
arch_timer_rate
/
10000
)
%
100
);
pr_info_once
(
"Architected local timer running at %lu.%02luMHz (%s).
\n
"
,
arch_timer_rate
/
1000000
,
(
arch_timer_rate
/
10000
)
%
100
,
arch_timer_use_virtual
?
"virt"
:
"phys"
);
return
0
;
}
static
inline
cycle_t
arch_counter_get_cntpct
(
void
)
static
u32
notrace
arch_counter_get_cntpct32
(
void
)
{
u32
cvall
,
cvalh
;
asm
volatile
(
"mrrc p15, 0, %0, %1, c14"
:
"=r"
(
cvall
),
"=r"
(
cvalh
));
cycle_t
cnt
=
arch_counter_get_cntpct
();
return
((
cycle_t
)
cvalh
<<
32
)
|
cvall
;
}
static
inline
cycle_t
arch_counter_get_cntvct
(
void
)
{
u32
cvall
,
cvalh
;
asm
volatile
(
"mrrc p15, 1, %0, %1, c14"
:
"=r"
(
cvall
),
"=r"
(
cvalh
));
return
((
cycle_t
)
cvalh
<<
32
)
|
cvall
;
/*
* The sched_clock infrastructure only knows about counters
* with at most 32bits. Forget about the upper 24 bits for the
* time being...
*/
return
(
u32
)
cnt
;
}
static
u32
notrace
arch_counter_get_cntvct32
(
void
)
{
cycle_t
cnt
vct
=
arch_counter_get_cntvct
();
cycle_t
cnt
=
arch_counter_get_cntvct
();
/*
* The sched_clock infrastructure only knows about counters
* with at most 32bits. Forget about the upper 24 bits for the
* time being...
*/
return
(
u32
)
(
cntvct
&
(
u32
)
~
0
)
;
return
(
u32
)
cnt
;
}
static
cycle_t
arch_counter_read
(
struct
clocksource
*
cs
)
{
/*
* Always use the physical counter for the clocksource.
* CNTHCTL.PL1PCTEN must be set to 1.
*/
return
arch_counter_get_cntpct
();
}
int
read_current_timer
(
unsigned
long
*
timer_val
)
static
unsigned
long
arch_timer_read_current_timer
(
void
)
{
if
(
!
arch_timer_rate
)
return
-
ENXIO
;
*
timer_val
=
arch_counter_get_cntpct
();
return
0
;
return
arch_counter_get_cntpct
();
}
static
cycle_t
arch_counter_read_cc
(
const
struct
cyclecounter
*
cc
)
{
/*
* Always use the physical counter for the clocksource.
* CNTHCTL.PL1PCTEN must be set to 1.
*/
return
arch_counter_get_cntpct
();
}
static
struct
clocksource
clocksource_counter
=
{
...
...
@@ -241,14 +347,32 @@ static struct clocksource clocksource_counter = {
.
flags
=
CLOCK_SOURCE_IS_CONTINUOUS
,
};
static
struct
cyclecounter
cyclecounter
=
{
.
read
=
arch_counter_read_cc
,
.
mask
=
CLOCKSOURCE_MASK
(
56
),
};
static
struct
timecounter
timecounter
;
struct
timecounter
*
arch_timer_get_timecounter
(
void
)
{
return
&
timecounter
;
}
static
void
__cpuinit
arch_timer_stop
(
struct
clock_event_device
*
clk
)
{
pr_debug
(
"arch_timer_teardown disable IRQ%d cpu #%d
\n
"
,
clk
->
irq
,
smp_processor_id
());
disable_percpu_irq
(
clk
->
irq
);
if
(
arch_timer_ppi2
)
disable_percpu_irq
(
arch_timer_ppi2
);
arch_timer_set_mode
(
CLOCK_EVT_MODE_UNUSED
,
clk
);
if
(
arch_timer_use_virtual
)
disable_percpu_irq
(
arch_timer_ppi
[
VIRT_PPI
]);
else
{
disable_percpu_irq
(
arch_timer_ppi
[
PHYS_SECURE_PPI
]);
if
(
arch_timer_ppi
[
PHYS_NONSECURE_PPI
])
disable_percpu_irq
(
arch_timer_ppi
[
PHYS_NONSECURE_PPI
]);
}
clk
->
set_mode
(
CLOCK_EVT_MODE_UNUSED
,
clk
);
}
static
struct
local_timer_ops
arch_timer_ops
__cpuinitdata
=
{
...
...
@@ -261,36 +385,48 @@ static struct clock_event_device arch_timer_global_evt;
static
int
__init
arch_timer_register
(
void
)
{
int
err
;
int
ppi
;
err
=
arch_timer_available
();
if
(
err
)
return
err
;
goto
out
;
arch_timer_evt
=
alloc_percpu
(
struct
clock_event_device
*
);
if
(
!
arch_timer_evt
)
return
-
ENOMEM
;
if
(
!
arch_timer_evt
)
{
err
=
-
ENOMEM
;
goto
out
;
}
clocksource_register_hz
(
&
clocksource_counter
,
arch_timer_rate
);
cyclecounter
.
mult
=
clocksource_counter
.
mult
;
cyclecounter
.
shift
=
clocksource_counter
.
shift
;
timecounter_init
(
&
timecounter
,
&
cyclecounter
,
arch_counter_get_cntpct
());
if
(
arch_timer_use_virtual
)
{
ppi
=
arch_timer_ppi
[
VIRT_PPI
];
err
=
request_percpu_irq
(
ppi
,
arch_timer_handler_virt
,
"arch_timer"
,
arch_timer_evt
);
}
else
{
ppi
=
arch_timer_ppi
[
PHYS_SECURE_PPI
];
err
=
request_percpu_irq
(
ppi
,
arch_timer_handler_phys
,
"arch_timer"
,
arch_timer_evt
);
if
(
!
err
&&
arch_timer_ppi
[
PHYS_NONSECURE_PPI
])
{
ppi
=
arch_timer_ppi
[
PHYS_NONSECURE_PPI
];
err
=
request_percpu_irq
(
ppi
,
arch_timer_handler_phys
,
"arch_timer"
,
arch_timer_evt
);
if
(
err
)
free_percpu_irq
(
arch_timer_ppi
[
PHYS_SECURE_PPI
],
arch_timer_evt
);
}
}
err
=
request_percpu_irq
(
arch_timer_ppi
,
arch_timer_handler
,
"arch_timer"
,
arch_timer_evt
);
if
(
err
)
{
pr_err
(
"arch_timer: can't register interrupt %d (%d)
\n
"
,
arch_timer_
ppi
,
err
);
ppi
,
err
);
goto
out_free
;
}
if
(
arch_timer_ppi2
)
{
err
=
request_percpu_irq
(
arch_timer_ppi2
,
arch_timer_handler
,
"arch_timer"
,
arch_timer_evt
);
if
(
err
)
{
pr_err
(
"arch_timer: can't register interrupt %d (%d)
\n
"
,
arch_timer_ppi2
,
err
);
arch_timer_ppi2
=
0
;
goto
out_free_irq
;
}
}
err
=
local_timer_register
(
&
arch_timer_ops
);
if
(
err
)
{
/*
...
...
@@ -302,21 +438,29 @@ static int __init arch_timer_register(void)
arch_timer_global_evt
.
cpumask
=
cpumask_of
(
0
);
err
=
arch_timer_setup
(
&
arch_timer_global_evt
);
}
if
(
err
)
goto
out_free_irq
;
init_current_timer_delay
(
arch_timer_rate
);
/* Use the architected timer for the delay loop. */
arch_delay_timer
.
read_current_timer
=
&
arch_timer_read_current_timer
;
arch_delay_timer
.
freq
=
arch_timer_rate
;
register_current_timer_delay
(
&
arch_delay_timer
);
return
0
;
out_free_irq:
free_percpu_irq
(
arch_timer_ppi
,
arch_timer_evt
);
if
(
arch_timer_ppi2
)
free_percpu_irq
(
arch_timer_ppi2
,
arch_timer_evt
);
if
(
arch_timer_use_virtual
)
free_percpu_irq
(
arch_timer_ppi
[
VIRT_PPI
],
arch_timer_evt
);
else
{
free_percpu_irq
(
arch_timer_ppi
[
PHYS_SECURE_PPI
],
arch_timer_evt
);
if
(
arch_timer_ppi
[
PHYS_NONSECURE_PPI
])
free_percpu_irq
(
arch_timer_ppi
[
PHYS_NONSECURE_PPI
],
arch_timer_evt
);
}
out_free:
free_percpu
(
arch_timer_evt
);
out:
return
err
;
}
...
...
@@ -329,6 +473,7 @@ int __init arch_timer_of_register(void)
{
struct
device_node
*
np
;
u32
freq
;
int
i
;
np
=
of_find_matching_node
(
NULL
,
arch_timer_of_match
);
if
(
!
np
)
{
...
...
@@ -340,22 +485,40 @@ int __init arch_timer_of_register(void)
if
(
!
of_property_read_u32
(
np
,
"clock-frequency"
,
&
freq
))
arch_timer_rate
=
freq
;
arch_timer_ppi
=
irq_of_parse_and_map
(
np
,
0
);
arch_timer_ppi2
=
irq_of_parse_and_map
(
np
,
1
);
pr_info
(
"arch_timer: found %s irqs %d %d
\n
"
,
np
->
name
,
arch_timer_ppi
,
arch_timer_ppi2
);
for
(
i
=
PHYS_SECURE_PPI
;
i
<
MAX_TIMER_PPI
;
i
++
)
arch_timer_ppi
[
i
]
=
irq_of_parse_and_map
(
np
,
i
);
/*
* If no interrupt provided for virtual timer, we'll have to
* stick to the physical timer. It'd better be accessible...
*/
if
(
!
arch_timer_ppi
[
VIRT_PPI
])
{
arch_timer_use_virtual
=
false
;
if
(
!
arch_timer_ppi
[
PHYS_SECURE_PPI
]
||
!
arch_timer_ppi
[
PHYS_NONSECURE_PPI
])
{
pr_warn
(
"arch_timer: No interrupt available, giving up
\n
"
);
return
-
EINVAL
;
}
}
return
arch_timer_register
();
}
int
__init
arch_timer_sched_clock_init
(
void
)
{
u32
(
*
cnt32
)(
void
);
int
err
;
err
=
arch_timer_available
();
if
(
err
)
return
err
;
setup_sched_clock
(
arch_counter_get_cntvct32
,
32
,
arch_timer_rate
);
if
(
arch_timer_use_virtual
)
cnt32
=
arch_counter_get_cntvct32
;
else
cnt32
=
arch_counter_get_cntpct32
;
setup_sched_clock
(
cnt32
,
32
,
arch_timer_rate
);
return
0
;
}
This diff is collapsed.
Click to expand it.
arch/arm/lib/delay.c
浏览文件 @
ceaa1a13
...
...
@@ -34,7 +34,18 @@ struct arm_delay_ops arm_delay_ops = {
.
udelay
=
__loop_udelay
,
};
#ifdef ARCH_HAS_READ_CURRENT_TIMER
static
const
struct
delay_timer
*
delay_timer
;
static
bool
delay_calibrated
;
int
read_current_timer
(
unsigned
long
*
timer_val
)
{
if
(
!
delay_timer
)
return
-
ENXIO
;
*
timer_val
=
delay_timer
->
read_current_timer
();
return
0
;
}
static
void
__timer_delay
(
unsigned
long
cycles
)
{
cycles_t
start
=
get_cycles
();
...
...
@@ -55,18 +66,24 @@ static void __timer_udelay(unsigned long usecs)
__timer_const_udelay
(
usecs
*
UDELAY_MULT
);
}
void
__init
init_current_timer_delay
(
unsigned
long
freq
)
void
__init
register_current_timer_delay
(
const
struct
delay_timer
*
timer
)
{
pr_info
(
"Switching to timer-based delay loop
\n
"
);
lpj_fine
=
freq
/
HZ
;
loops_per_jiffy
=
lpj_fine
;
arm_delay_ops
.
delay
=
__timer_delay
;
arm_delay_ops
.
const_udelay
=
__timer_const_udelay
;
arm_delay_ops
.
udelay
=
__timer_udelay
;
if
(
!
delay_calibrated
)
{
pr_info
(
"Switching to timer-based delay loop
\n
"
);
delay_timer
=
timer
;
lpj_fine
=
timer
->
freq
/
HZ
;
loops_per_jiffy
=
lpj_fine
;
arm_delay_ops
.
delay
=
__timer_delay
;
arm_delay_ops
.
const_udelay
=
__timer_const_udelay
;
arm_delay_ops
.
udelay
=
__timer_udelay
;
delay_calibrated
=
true
;
}
else
{
pr_info
(
"Ignoring duplicate/late registration of read_current_timer delay
\n
"
);
}
}
unsigned
long
__cpuinit
calibrate_delay_is_known
(
void
)
{
delay_calibrated
=
true
;
return
lpj_fine
;
}
#endif
This diff is collapsed.
Click to expand it.
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录
新手
引导
客服
返回
顶部