提交 6c62985d 编写于 作者: W Waiman Long 提交者: Paolo Bonzini

x86/paravirt: Change vcp_is_preempted() arg type to long

The cpu argument in the function prototype of vcpu_is_preempted()
is changed from int to long. That makes it easier to provide a better
optimized assembly version of that function.

For Xen, vcpu_is_preempted(long) calls xen_vcpu_stolen(int), the
downcast from long to int is not a problem as vCPU number won't exceed
32 bits.
Signed-off-by: NWaiman Long <longman@redhat.com>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 96794e4e
...@@ -673,7 +673,7 @@ static __always_inline void pv_kick(int cpu) ...@@ -673,7 +673,7 @@ static __always_inline void pv_kick(int cpu)
PVOP_VCALL1(pv_lock_ops.kick, cpu); PVOP_VCALL1(pv_lock_ops.kick, cpu);
} }
static __always_inline bool pv_vcpu_is_preempted(int cpu) static __always_inline bool pv_vcpu_is_preempted(long cpu)
{ {
return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu); return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
} }
......
...@@ -34,7 +34,7 @@ static inline void queued_spin_unlock(struct qspinlock *lock) ...@@ -34,7 +34,7 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
} }
#define vcpu_is_preempted vcpu_is_preempted #define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(int cpu) static inline bool vcpu_is_preempted(long cpu)
{ {
return pv_vcpu_is_preempted(cpu); return pv_vcpu_is_preempted(cpu);
} }
......
...@@ -589,7 +589,7 @@ static void kvm_wait(u8 *ptr, u8 val) ...@@ -589,7 +589,7 @@ static void kvm_wait(u8 *ptr, u8 val)
local_irq_restore(flags); local_irq_restore(flags);
} }
__visible bool __kvm_vcpu_is_preempted(int cpu) __visible bool __kvm_vcpu_is_preempted(long cpu)
{ {
struct kvm_steal_time *src = &per_cpu(steal_time, cpu); struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
......
...@@ -20,7 +20,7 @@ bool pv_is_native_spin_unlock(void) ...@@ -20,7 +20,7 @@ bool pv_is_native_spin_unlock(void)
__raw_callee_save___native_queued_spin_unlock; __raw_callee_save___native_queued_spin_unlock;
} }
__visible bool __native_vcpu_is_preempted(int cpu) __visible bool __native_vcpu_is_preempted(long cpu)
{ {
return false; return false;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册