Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
eac4345b
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
eac4345b
编写于
7月 31, 2008
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'x86/spinlocks' into x86/xen
上级
5fbf2465
d5de8841
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
226 addition
and
193 deletion
+226
-193
arch/x86/kernel/Makefile
arch/x86/kernel/Makefile
+2
-2
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt-spinlocks.c
+31
-0
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt.c
+0
-23
arch/x86/xen/Makefile
arch/x86/xen/Makefile
+7
-1
arch/x86/xen/smp.c
arch/x86/xen/smp.c
+0
-167
arch/x86/xen/spinlock.c
arch/x86/xen/spinlock.c
+183
-0
arch/x86/xen/xen-ops.h
arch/x86/xen/xen-ops.h
+3
-0
未找到文件。
arch/x86/kernel/Makefile
浏览文件 @
eac4345b
...
...
@@ -10,7 +10,7 @@ ifdef CONFIG_FTRACE
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_tsc.o
=
-pg
CFLAGS_REMOVE_rtc.o
=
-pg
CFLAGS_REMOVE_paravirt.o
=
-pg
CFLAGS_REMOVE_paravirt
-spinlocks
.o
=
-pg
endif
#
...
...
@@ -89,7 +89,7 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
obj-$(CONFIG_VMI)
+=
vmi_32.o vmiclock_32.o
obj-$(CONFIG_KVM_GUEST)
+=
kvm.o
obj-$(CONFIG_KVM_CLOCK)
+=
kvmclock.o
obj-$(CONFIG_PARAVIRT)
+=
paravirt.o paravirt_patch_
$(BITS)
.o
obj-$(CONFIG_PARAVIRT)
+=
paravirt.o paravirt_patch_
$(BITS)
.o
paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK)
+=
pvclock.o
obj-$(CONFIG_PCSPKR_PLATFORM)
+=
pcspeaker.o
...
...
arch/x86/kernel/paravirt-spinlocks.c
0 → 100644
浏览文件 @
eac4345b
/*
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/paravirt.h>
struct
pv_lock_ops
pv_lock_ops
=
{
#ifdef CONFIG_SMP
.
spin_is_locked
=
__ticket_spin_is_locked
,
.
spin_is_contended
=
__ticket_spin_is_contended
,
.
spin_lock
=
__ticket_spin_lock
,
.
spin_trylock
=
__ticket_spin_trylock
,
.
spin_unlock
=
__ticket_spin_unlock
,
#endif
};
EXPORT_SYMBOL_GPL
(
pv_lock_ops
);
void
__init
paravirt_use_bytelocks
(
void
)
{
#ifdef CONFIG_SMP
pv_lock_ops
.
spin_is_locked
=
__byte_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
__byte_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
__byte_spin_lock
;
pv_lock_ops
.
spin_trylock
=
__byte_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
__byte_spin_unlock
;
#endif
}
arch/x86/kernel/paravirt.c
浏览文件 @
eac4345b
...
...
@@ -268,17 +268,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return
__get_cpu_var
(
paravirt_lazy_mode
);
}
void
__init
paravirt_use_bytelocks
(
void
)
{
#ifdef CONFIG_SMP
pv_lock_ops
.
spin_is_locked
=
__byte_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
__byte_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
__byte_spin_lock
;
pv_lock_ops
.
spin_trylock
=
__byte_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
__byte_spin_unlock
;
#endif
}
struct
pv_info
pv_info
=
{
.
name
=
"bare hardware"
,
.
paravirt_enabled
=
0
,
...
...
@@ -465,18 +454,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.
set_fixmap
=
native_set_fixmap
,
};
struct
pv_lock_ops
pv_lock_ops
=
{
#ifdef CONFIG_SMP
.
spin_is_locked
=
__ticket_spin_is_locked
,
.
spin_is_contended
=
__ticket_spin_is_contended
,
.
spin_lock
=
__ticket_spin_lock
,
.
spin_trylock
=
__ticket_spin_trylock
,
.
spin_unlock
=
__ticket_spin_unlock
,
#endif
};
EXPORT_SYMBOL_GPL
(
pv_lock_ops
);
EXPORT_SYMBOL_GPL
(
pv_time_ops
);
EXPORT_SYMBOL
(
pv_cpu_ops
);
EXPORT_SYMBOL
(
pv_mmu_ops
);
...
...
arch/x86/xen/Makefile
浏览文件 @
eac4345b
ifdef
CONFIG_FTRACE
# Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_spinlock.o
=
-pg
CFLAGS_REMOVE_time.o
=
-pg
endif
obj-y
:=
enlighten.o setup.o multicalls.o mmu.o
\
time.o xen-asm_
$(BITS)
.o grant-table.o suspend.o
obj-$(CONFIG_SMP)
+=
smp.o
obj-$(CONFIG_SMP)
+=
smp.o
spinlock.o
arch/x86/xen/smp.c
浏览文件 @
eac4345b
...
...
@@ -15,7 +15,6 @@
* This does not handle HOTPLUG_CPU yet.
*/
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/err.h>
#include <linux/smp.h>
...
...
@@ -36,8 +35,6 @@
#include "xen-ops.h"
#include "mmu.h"
static
void
__cpuinit
xen_init_lock_cpu
(
int
cpu
);
cpumask_t
xen_cpu_initialized_map
;
static
DEFINE_PER_CPU
(
int
,
resched_irq
);
...
...
@@ -419,170 +416,6 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
return
IRQ_HANDLED
;
}
struct
xen_spinlock
{
unsigned
char
lock
;
/* 0 -> free; 1 -> locked */
unsigned
short
spinners
;
/* count of waiting cpus */
};
static
int
xen_spin_is_locked
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
return
xl
->
lock
!=
0
;
}
static
int
xen_spin_is_contended
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
/* Not strictly true; this is only the count of contended
lock-takers entering the slow path. */
return
xl
->
spinners
!=
0
;
}
static
int
xen_spin_trylock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
u8
old
=
1
;
asm
(
"xchgb %b0,%1"
:
"+q"
(
old
),
"+m"
(
xl
->
lock
)
:
:
"memory"
);
return
old
==
0
;
}
static
DEFINE_PER_CPU
(
int
,
lock_kicker_irq
)
=
-
1
;
static
DEFINE_PER_CPU
(
struct
xen_spinlock
*
,
lock_spinners
);
static
inline
void
spinning_lock
(
struct
xen_spinlock
*
xl
)
{
__get_cpu_var
(
lock_spinners
)
=
xl
;
wmb
();
/* set lock of interest before count */
asm
(
LOCK_PREFIX
" incw %0"
:
"+m"
(
xl
->
spinners
)
:
:
"memory"
);
}
static
inline
void
unspinning_lock
(
struct
xen_spinlock
*
xl
)
{
asm
(
LOCK_PREFIX
" decw %0"
:
"+m"
(
xl
->
spinners
)
:
:
"memory"
);
wmb
();
/* decrement count before clearing lock */
__get_cpu_var
(
lock_spinners
)
=
NULL
;
}
static
noinline
int
xen_spin_lock_slow
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
int
irq
=
__get_cpu_var
(
lock_kicker_irq
);
int
ret
;
/* If kicker interrupts not initialized yet, just spin */
if
(
irq
==
-
1
)
return
0
;
/* announce we're spinning */
spinning_lock
(
xl
);
/* clear pending */
xen_clear_irq_pending
(
irq
);
/* check again make sure it didn't become free while
we weren't looking */
ret
=
xen_spin_trylock
(
lock
);
if
(
ret
)
goto
out
;
/* block until irq becomes pending */
xen_poll_irq
(
irq
);
kstat_this_cpu
.
irqs
[
irq
]
++
;
out:
unspinning_lock
(
xl
);
return
ret
;
}
static
void
xen_spin_lock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
int
timeout
;
u8
oldval
;
do
{
timeout
=
1
<<
10
;
asm
(
"1: xchgb %1,%0
\n
"
" testb %1,%1
\n
"
" jz 3f
\n
"
"2: rep;nop
\n
"
" cmpb $0,%0
\n
"
" je 1b
\n
"
" dec %2
\n
"
" jnz 2b
\n
"
"3:
\n
"
:
"+m"
(
xl
->
lock
),
"=q"
(
oldval
),
"+r"
(
timeout
)
:
"1"
(
1
)
:
"memory"
);
}
while
(
unlikely
(
oldval
!=
0
&&
!
xen_spin_lock_slow
(
lock
)));
}
static
noinline
void
xen_spin_unlock_slow
(
struct
xen_spinlock
*
xl
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
{
/* XXX should mix up next cpu selection */
if
(
per_cpu
(
lock_spinners
,
cpu
)
==
xl
)
{
xen_send_IPI_one
(
cpu
,
XEN_SPIN_UNLOCK_VECTOR
);
break
;
}
}
}
static
void
xen_spin_unlock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
smp_wmb
();
/* make sure no writes get moved after unlock */
xl
->
lock
=
0
;
/* release lock */
/* make sure unlock happens before kick */
barrier
();
if
(
unlikely
(
xl
->
spinners
))
xen_spin_unlock_slow
(
xl
);
}
static
__cpuinit
void
xen_init_lock_cpu
(
int
cpu
)
{
int
irq
;
const
char
*
name
;
name
=
kasprintf
(
GFP_KERNEL
,
"spinlock%d"
,
cpu
);
irq
=
bind_ipi_to_irqhandler
(
XEN_SPIN_UNLOCK_VECTOR
,
cpu
,
xen_reschedule_interrupt
,
IRQF_DISABLED
|
IRQF_PERCPU
|
IRQF_NOBALANCING
,
name
,
NULL
);
if
(
irq
>=
0
)
{
disable_irq
(
irq
);
/* make sure it's never delivered */
per_cpu
(
lock_kicker_irq
,
cpu
)
=
irq
;
}
printk
(
"cpu %d spinlock event irq %d
\n
"
,
cpu
,
irq
);
}
static
void
__init
xen_init_spinlocks
(
void
)
{
pv_lock_ops
.
spin_is_locked
=
xen_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
xen_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
xen_spin_lock
;
pv_lock_ops
.
spin_trylock
=
xen_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
xen_spin_unlock
;
}
static
const
struct
smp_ops
xen_smp_ops
__initdata
=
{
.
smp_prepare_boot_cpu
=
xen_smp_prepare_boot_cpu
,
.
smp_prepare_cpus
=
xen_smp_prepare_cpus
,
...
...
arch/x86/xen/spinlock.c
0 → 100644
浏览文件 @
eac4345b
/*
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
*/
#include <linux/kernel_stat.h>
#include <linux/spinlock.h>
#include <asm/paravirt.h>
#include <xen/interface/xen.h>
#include <xen/events.h>
#include "xen-ops.h"
struct
xen_spinlock
{
unsigned
char
lock
;
/* 0 -> free; 1 -> locked */
unsigned
short
spinners
;
/* count of waiting cpus */
};
static
int
xen_spin_is_locked
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
return
xl
->
lock
!=
0
;
}
static
int
xen_spin_is_contended
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
/* Not strictly true; this is only the count of contended
lock-takers entering the slow path. */
return
xl
->
spinners
!=
0
;
}
static
int
xen_spin_trylock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
u8
old
=
1
;
asm
(
"xchgb %b0,%1"
:
"+q"
(
old
),
"+m"
(
xl
->
lock
)
:
:
"memory"
);
return
old
==
0
;
}
static
DEFINE_PER_CPU
(
int
,
lock_kicker_irq
)
=
-
1
;
static
DEFINE_PER_CPU
(
struct
xen_spinlock
*
,
lock_spinners
);
static
inline
void
spinning_lock
(
struct
xen_spinlock
*
xl
)
{
__get_cpu_var
(
lock_spinners
)
=
xl
;
wmb
();
/* set lock of interest before count */
asm
(
LOCK_PREFIX
" incw %0"
:
"+m"
(
xl
->
spinners
)
:
:
"memory"
);
}
static
inline
void
unspinning_lock
(
struct
xen_spinlock
*
xl
)
{
asm
(
LOCK_PREFIX
" decw %0"
:
"+m"
(
xl
->
spinners
)
:
:
"memory"
);
wmb
();
/* decrement count before clearing lock */
__get_cpu_var
(
lock_spinners
)
=
NULL
;
}
static
noinline
int
xen_spin_lock_slow
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
int
irq
=
__get_cpu_var
(
lock_kicker_irq
);
int
ret
;
/* If kicker interrupts not initialized yet, just spin */
if
(
irq
==
-
1
)
return
0
;
/* announce we're spinning */
spinning_lock
(
xl
);
/* clear pending */
xen_clear_irq_pending
(
irq
);
/* check again make sure it didn't become free while
we weren't looking */
ret
=
xen_spin_trylock
(
lock
);
if
(
ret
)
goto
out
;
/* block until irq becomes pending */
xen_poll_irq
(
irq
);
kstat_this_cpu
.
irqs
[
irq
]
++
;
out:
unspinning_lock
(
xl
);
return
ret
;
}
static
void
xen_spin_lock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
int
timeout
;
u8
oldval
;
do
{
timeout
=
1
<<
10
;
asm
(
"1: xchgb %1,%0
\n
"
" testb %1,%1
\n
"
" jz 3f
\n
"
"2: rep;nop
\n
"
" cmpb $0,%0
\n
"
" je 1b
\n
"
" dec %2
\n
"
" jnz 2b
\n
"
"3:
\n
"
:
"+m"
(
xl
->
lock
),
"=q"
(
oldval
),
"+r"
(
timeout
)
:
"1"
(
1
)
:
"memory"
);
}
while
(
unlikely
(
oldval
!=
0
&&
!
xen_spin_lock_slow
(
lock
)));
}
static
noinline
void
xen_spin_unlock_slow
(
struct
xen_spinlock
*
xl
)
{
int
cpu
;
for_each_online_cpu
(
cpu
)
{
/* XXX should mix up next cpu selection */
if
(
per_cpu
(
lock_spinners
,
cpu
)
==
xl
)
{
xen_send_IPI_one
(
cpu
,
XEN_SPIN_UNLOCK_VECTOR
);
break
;
}
}
}
static
void
xen_spin_unlock
(
struct
raw_spinlock
*
lock
)
{
struct
xen_spinlock
*
xl
=
(
struct
xen_spinlock
*
)
lock
;
smp_wmb
();
/* make sure no writes get moved after unlock */
xl
->
lock
=
0
;
/* release lock */
/* make sure unlock happens before kick */
barrier
();
if
(
unlikely
(
xl
->
spinners
))
xen_spin_unlock_slow
(
xl
);
}
static
irqreturn_t
dummy_handler
(
int
irq
,
void
*
dev_id
)
{
BUG
();
return
IRQ_HANDLED
;
}
void
__cpuinit
xen_init_lock_cpu
(
int
cpu
)
{
int
irq
;
const
char
*
name
;
name
=
kasprintf
(
GFP_KERNEL
,
"spinlock%d"
,
cpu
);
irq
=
bind_ipi_to_irqhandler
(
XEN_SPIN_UNLOCK_VECTOR
,
cpu
,
dummy_handler
,
IRQF_DISABLED
|
IRQF_PERCPU
|
IRQF_NOBALANCING
,
name
,
NULL
);
if
(
irq
>=
0
)
{
disable_irq
(
irq
);
/* make sure it's never delivered */
per_cpu
(
lock_kicker_irq
,
cpu
)
=
irq
;
}
printk
(
"cpu %d spinlock event irq %d
\n
"
,
cpu
,
irq
);
}
void
__init
xen_init_spinlocks
(
void
)
{
pv_lock_ops
.
spin_is_locked
=
xen_spin_is_locked
;
pv_lock_ops
.
spin_is_contended
=
xen_spin_is_contended
;
pv_lock_ops
.
spin_lock
=
xen_spin_lock
;
pv_lock_ops
.
spin_trylock
=
xen_spin_trylock
;
pv_lock_ops
.
spin_unlock
=
xen_spin_unlock
;
}
arch/x86/xen/xen-ops.h
浏览文件 @
eac4345b
...
...
@@ -50,6 +50,9 @@ void __init xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
void
xen_smp_init
(
void
);
void
__init
xen_init_spinlocks
(
void
);
__cpuinit
void
xen_init_lock_cpu
(
int
cpu
);
extern
cpumask_t
xen_cpu_initialized_map
;
#else
static
inline
void
xen_smp_init
(
void
)
{}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录