提交 4bb689ee 编写于 作者: I Ingo Molnar

x86: paravirt spinlocks, !CONFIG_SMP build fixes

Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 2d9e1e2f
......@@ -270,11 +270,13 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
void __init paravirt_use_bytelocks(void)
{
#ifdef CONFIG_SMP
pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
pv_lock_ops.spin_lock = __byte_spin_lock;
pv_lock_ops.spin_trylock = __byte_spin_trylock;
pv_lock_ops.spin_unlock = __byte_spin_unlock;
#endif
}
struct pv_info pv_info = {
......@@ -461,12 +463,14 @@ struct pv_mmu_ops pv_mmu_ops = {
};
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
.spin_is_locked = __ticket_spin_is_locked,
.spin_is_contended = __ticket_spin_is_contended,
.spin_lock = __ticket_spin_lock,
.spin_trylock = __ticket_spin_trylock,
.spin_unlock = __ticket_spin_unlock,
#endif
};
EXPORT_SYMBOL_GPL(pv_time_ops);
......
......@@ -1387,6 +1387,8 @@ void _paravirt_nop(void);
void paravirt_use_bytelocks(void);
#ifdef CONFIG_SMP
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
{
return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
......@@ -1412,6 +1414,8 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
return PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
}
#endif
/* These all sit in the .parainstructions section to tell us what to patch. */
struct paravirt_patch_site {
u8 *instr; /* original instructions */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册