paravirt-spinlocks.c 1.1 KB
Newer Older
1 2 3 4 5
/*
 * Split spinlock implementation out into its own file, so it can be
 * compiled in a FTRACE-compatible way.
 */
#include <linux/spinlock.h>
6
#include <linux/export.h>
7
#include <linux/jump_label.h>
8 9 10

#include <asm/paravirt.h>

11 12 13 14 15 16 17 18 19 20 21 22
__visible void __native_queued_spin_unlock(struct qspinlock *lock)
{
	native_queued_spin_unlock(lock);
}
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);

bool pv_is_native_spin_unlock(void)
{
	return pv_lock_ops.queued_spin_unlock.func ==
		__raw_callee_save___native_queued_spin_unlock;
}

23 24 25 26 27 28 29
__visible bool __native_vcpu_is_preempted(int cpu)
{
	return false;
}
PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);

bool pv_is_native_vcpu_is_preempted(void)
30
{
31 32
	return pv_lock_ops.vcpu_is_preempted.func ==
		__raw_callee_save___native_vcpu_is_preempted;
33 34
}

35 36
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
37 38 39 40
	.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
	.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
	.wait = paravirt_nop,
	.kick = paravirt_nop,
41
	.vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
42
#endif /* SMP */
43
};
44
EXPORT_SYMBOL(pv_lock_ops);