提交 5a868a49 编写于 作者: W Wei Li 提交者: Zheng Zengkai

KVM: arm64: Rename 'struct pv_sched_ops'

hulk inclusion
category: feature
bugzilla: 169576
CVE: NA

-------------------------------------------------

Refer to x86, rename 'struct pv_sched_ops sched' to
'struct pv_lock_ops lock' to prepare for supporting CNA on arm64.
Signed-off-by: NWei Li <liwei391@huawei.com>
Reviewed-by: NXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
上级 72492082
......@@ -11,7 +11,7 @@ struct pv_time_ops {
unsigned long long (*steal_clock)(int cpu);
};
struct pv_sched_ops {
struct pv_lock_ops {
void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
void (*queued_spin_unlock)(struct qspinlock *lock);
......@@ -23,7 +23,7 @@ struct pv_sched_ops {
struct paravirt_patch_template {
struct pv_time_ops time;
struct pv_sched_ops sched;
struct pv_lock_ops lock;
};
extern struct paravirt_patch_template pv_ops;
......@@ -40,7 +40,7 @@ int __init pv_sched_init(void);
__visible bool __native_vcpu_is_preempted(int cpu);
static inline bool pv_vcpu_is_preempted(int cpu)
{
return pv_ops.sched.vcpu_is_preempted(cpu);
return pv_ops.lock.vcpu_is_preempted(cpu);
}
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
......@@ -48,22 +48,22 @@ void __init pv_qspinlock_init(void);
bool pv_is_native_spin_unlock(void);
static inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
return pv_ops.sched.queued_spin_lock_slowpath(lock, val);
return pv_ops.lock.queued_spin_lock_slowpath(lock, val);
}
static inline void pv_queued_spin_unlock(struct qspinlock *lock)
{
return pv_ops.sched.queued_spin_unlock(lock);
return pv_ops.lock.queued_spin_unlock(lock);
}
static inline void pv_wait(u8 *ptr, u8 val)
{
return pv_ops.sched.wait(ptr, val);
return pv_ops.lock.wait(ptr, val);
}
static inline void pv_kick(int cpu)
{
return pv_ops.sched.kick(cpu);
return pv_ops.lock.kick(cpu);
}
#else
......
......@@ -33,10 +33,10 @@ struct static_key paravirt_steal_rq_enabled;
struct paravirt_patch_template pv_ops = {
#ifdef CONFIG_PARAVIRT_SPINLOCKS
.sched.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
.sched.queued_spin_unlock = native_queued_spin_unlock,
.lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
.lock.queued_spin_unlock = native_queued_spin_unlock,
#endif
.sched.vcpu_is_preempted = __native_vcpu_is_preempted,
.lock.vcpu_is_preempted = __native_vcpu_is_preempted,
};
EXPORT_SYMBOL_GPL(pv_ops);
......@@ -301,10 +301,10 @@ void __init pv_qspinlock_init(void)
pr_info("PV qspinlocks enabled\n");
__pv_init_lock_hash();
pv_ops.sched.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_ops.sched.queued_spin_unlock = __pv_queued_spin_unlock;
pv_ops.sched.wait = kvm_wait;
pv_ops.sched.kick = kvm_kick_cpu;
pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
pv_ops.lock.queued_spin_unlock = __pv_queued_spin_unlock;
pv_ops.lock.wait = kvm_wait;
pv_ops.lock.kick = kvm_kick_cpu;
}
static __init int arm_parse_pvspin(char *arg)
......@@ -331,7 +331,7 @@ int __init pv_sched_init(void)
if (ret)
return ret;
pv_ops.sched.vcpu_is_preempted = kvm_vcpu_is_preempted;
pv_ops.lock.vcpu_is_preempted = kvm_vcpu_is_preempted;
pr_info("using PV sched preempted\n");
pv_qspinlock_init();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册