diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 6dd802c6d7806c68741b73534029b85a6e1c5cb7..cd1b362e4a237284a171900d7ab7389c428e335f 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -673,7 +673,7 @@ static cpumask_t waiting_cpus; /* Track spinlock on which a cpu is waiting */ static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting); -static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) +__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) { struct kvm_lock_waiting *w; int cpu; diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 0e36cde12f7e7de605d676055897f04bb231d654..581521c843a576d4264567e90c11dfaf645d6238 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -106,7 +106,7 @@ static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); static cpumask_t waiting_cpus; static bool xen_pvspin = true; -static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) +__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) { int irq = __this_cpu_read(lock_kicker_irq); struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);