提交 e935b837 编写于 作者: J Jan Kiszka 提交者: Marcelo Tosatti

KVM: Convert kvm_lock to raw_spinlock

Code under this lock requires non-preemptibility. Ensure this also over
-rt by converting it to raw spinlock.
Signed-off-by: NJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 bd3d1ec3
...@@ -85,7 +85,7 @@ ...@@ -85,7 +85,7 @@
#define ASYNC_PF_PER_VCPU 64 #define ASYNC_PF_PER_VCPU 64
extern spinlock_t kvm_lock; extern raw_spinlock_t kvm_lock;
extern struct list_head vm_list; extern struct list_head vm_list;
struct kvm_vcpu; struct kvm_vcpu;
......
...@@ -3587,7 +3587,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) ...@@ -3587,7 +3587,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
if (nr_to_scan == 0) if (nr_to_scan == 0)
goto out; goto out;
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
int idx, freed_pages; int idx, freed_pages;
...@@ -3610,7 +3610,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) ...@@ -3610,7 +3610,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
if (kvm_freed) if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list); list_move_tail(&kvm_freed->vm_list, &vm_list);
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
out: out:
return percpu_counter_read_positive(&kvm_total_used_mmu_pages); return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
......
...@@ -4557,7 +4557,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va ...@@ -4557,7 +4557,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu) if (vcpu->cpu != freq->cpu)
...@@ -4567,7 +4567,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va ...@@ -4567,7 +4567,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
send_ipi = 1; send_ipi = 1;
} }
} }
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) { if (freq->old < freq->new && send_ipi) {
/* /*
......
...@@ -69,7 +69,7 @@ MODULE_LICENSE("GPL"); ...@@ -69,7 +69,7 @@ MODULE_LICENSE("GPL");
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock * kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/ */
DEFINE_SPINLOCK(kvm_lock); DEFINE_RAW_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list); LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled; static cpumask_var_t cpus_hardware_enabled;
...@@ -481,9 +481,9 @@ static struct kvm *kvm_create_vm(void) ...@@ -481,9 +481,9 @@ static struct kvm *kvm_create_vm(void)
mutex_init(&kvm->irq_lock); mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock); mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1); atomic_set(&kvm->users_count, 1);
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list); list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
return kvm; return kvm;
...@@ -556,9 +556,9 @@ static void kvm_destroy_vm(struct kvm *kvm) ...@@ -556,9 +556,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
struct mm_struct *mm = kvm->mm; struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm); kvm_arch_sync_events(kvm);
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
list_del(&kvm->vm_list); list_del(&kvm->vm_list);
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm); kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) for (i = 0; i < KVM_NR_BUSES; i++)
kvm_io_bus_destroy(kvm->buses[i]); kvm_io_bus_destroy(kvm->buses[i]);
...@@ -2177,9 +2177,9 @@ static void hardware_enable_nolock(void *junk) ...@@ -2177,9 +2177,9 @@ static void hardware_enable_nolock(void *junk)
static void hardware_enable(void *junk) static void hardware_enable(void *junk)
{ {
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
hardware_enable_nolock(junk); hardware_enable_nolock(junk);
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
} }
static void hardware_disable_nolock(void *junk) static void hardware_disable_nolock(void *junk)
...@@ -2194,9 +2194,9 @@ static void hardware_disable_nolock(void *junk) ...@@ -2194,9 +2194,9 @@ static void hardware_disable_nolock(void *junk)
static void hardware_disable(void *junk) static void hardware_disable(void *junk)
{ {
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
hardware_disable_nolock(junk); hardware_disable_nolock(junk);
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
} }
static void hardware_disable_all_nolock(void) static void hardware_disable_all_nolock(void)
...@@ -2210,16 +2210,16 @@ static void hardware_disable_all_nolock(void) ...@@ -2210,16 +2210,16 @@ static void hardware_disable_all_nolock(void)
static void hardware_disable_all(void) static void hardware_disable_all(void)
{ {
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
hardware_disable_all_nolock(); hardware_disable_all_nolock();
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
} }
static int hardware_enable_all(void) static int hardware_enable_all(void)
{ {
int r = 0; int r = 0;
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
kvm_usage_count++; kvm_usage_count++;
if (kvm_usage_count == 1) { if (kvm_usage_count == 1) {
...@@ -2232,7 +2232,7 @@ static int hardware_enable_all(void) ...@@ -2232,7 +2232,7 @@ static int hardware_enable_all(void)
} }
} }
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
return r; return r;
} }
...@@ -2394,10 +2394,10 @@ static int vm_stat_get(void *_offset, u64 *val) ...@@ -2394,10 +2394,10 @@ static int vm_stat_get(void *_offset, u64 *val)
struct kvm *kvm; struct kvm *kvm;
*val = 0; *val = 0;
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) list_for_each_entry(kvm, &vm_list, vm_list)
*val += *(u32 *)((void *)kvm + offset); *val += *(u32 *)((void *)kvm + offset);
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
return 0; return 0;
} }
...@@ -2411,12 +2411,12 @@ static int vcpu_stat_get(void *_offset, u64 *val) ...@@ -2411,12 +2411,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
int i; int i;
*val = 0; *val = 0;
spin_lock(&kvm_lock); raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u32 *)((void *)vcpu + offset); *val += *(u32 *)((void *)vcpu + offset);
spin_unlock(&kvm_lock); raw_spin_unlock(&kvm_lock);
return 0; return 0;
} }
...@@ -2457,7 +2457,7 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state) ...@@ -2457,7 +2457,7 @@ static int kvm_suspend(struct sys_device *dev, pm_message_t state)
static int kvm_resume(struct sys_device *dev) static int kvm_resume(struct sys_device *dev)
{ {
if (kvm_usage_count) { if (kvm_usage_count) {
WARN_ON(spin_is_locked(&kvm_lock)); WARN_ON(raw_spin_is_locked(&kvm_lock));
hardware_enable_nolock(NULL); hardware_enable_nolock(NULL);
} }
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册