提交 58ddfe6c 编写于 作者: L Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:

 - ARM/ARM64 locking fixes

 - x86 fixes: PCID, UMIP, locking

 - improved support for recent Windows version that have a 2048 Hz APIC
   timer

 - rename KVM_HINTS_DEDICATED CPUID bit to KVM_HINTS_REALTIME

 - better behaved selftests

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: rename KVM_HINTS_DEDICATED to KVM_HINTS_REALTIME
  KVM: arm/arm64: VGIC/ITS save/restore: protect kvm_read_guest() calls
  KVM: arm/arm64: VGIC/ITS: protect kvm_read_guest() calls with SRCU lock
  KVM: arm/arm64: VGIC/ITS: Promote irq_lock() in update_affinity
  KVM: arm/arm64: Properly protect VGIC locks from IRQs
  KVM: X86: Lower the default timer frequency limit to 200us
  KVM: vmx: update sec exec controls for UMIP iff emulating UMIP
  kvm: x86: Suppress CR3_PCID_INVD bit only when PCIDs are enabled
  KVM: selftests: exit with 0 status code when tests cannot be run
  KVM: hyperv: idr_find needs RCU protection
  x86: Delay skip of emulated hypercall instruction
  KVM: Extend MAX_IRQ_ROUTES to 4096 for all archs
上级 7c9a0fc7 633711e8
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
...@@ -72,8 +72,8 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side ...@@ -72,8 +72,8 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
flag || value || meaning flag || value || meaning
================================================================================== ==================================================================================
KVM_HINTS_DEDICATED || 0 || guest checks this feature bit to KVM_HINTS_REALTIME || 0 || guest checks this feature bit to
|| || determine if there is vCPU pinning || || determine that vCPUs are never
|| || and there is no vCPU over-commitment, || || preempted for an unlimited time,
|| || allowing optimizations || || allowing optimizations
---------------------------------------------------------------------------------- ----------------------------------------------------------------------------------
...@@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void) ...@@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
return 8; return 8;
} }
/*
* We are not in the kvm->srcu critical section most of the time, so we take
* the SRCU read lock here. Since we copy the data from the user page, we
* can immediately drop the lock again.
*/
static inline int kvm_read_guest_lock(struct kvm *kvm,
gpa_t gpa, void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_read_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
static inline void *kvm_get_hyp_vector(void) static inline void *kvm_get_hyp_vector(void)
{ {
return kvm_ksym_ref(__kvm_hyp_vector); return kvm_ksym_ref(__kvm_hyp_vector);
......
...@@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void) ...@@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8; return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
} }
/*
* We are not in the kvm->srcu critical section most of the time, so we take
* the SRCU read lock here. Since we copy the data from the user page, we
* can immediately drop the lock again.
*/
static inline int kvm_read_guest_lock(struct kvm *kvm,
gpa_t gpa, void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_read_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
#ifdef CONFIG_KVM_INDIRECT_VECTORS #ifdef CONFIG_KVM_INDIRECT_VECTORS
/* /*
* EL2 vectors can be mapped and rerouted in a number of ways, * EL2 vectors can be mapped and rerouted in a number of ways,
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#define KVM_FEATURE_PV_TLB_FLUSH 9 #define KVM_FEATURE_PV_TLB_FLUSH 9
#define KVM_FEATURE_ASYNC_PF_VMEXIT 10 #define KVM_FEATURE_ASYNC_PF_VMEXIT 10
#define KVM_HINTS_DEDICATED 0 #define KVM_HINTS_REALTIME 0
/* The last 8 bits are used to indicate how to interpret the flags field /* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored. * in pvclock structure. If no bits are set, all flags are ignored.
......
...@@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void) ...@@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void)
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus) static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
{ {
native_smp_prepare_cpus(max_cpus); native_smp_prepare_cpus(max_cpus);
if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) if (kvm_para_has_hint(KVM_HINTS_REALTIME))
static_branch_disable(&virt_spin_lock_key); static_branch_disable(&virt_spin_lock_key);
} }
...@@ -553,7 +553,7 @@ static void __init kvm_guest_init(void) ...@@ -553,7 +553,7 @@ static void __init kvm_guest_init(void)
} }
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_DEDICATED) && !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
...@@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void) ...@@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
int cpu; int cpu;
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_DEDICATED) && !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
...@@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void) ...@@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void)
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return; return;
if (kvm_para_has_hint(KVM_HINTS_DEDICATED)) if (kvm_para_has_hint(KVM_HINTS_REALTIME))
return; return;
__pv_init_lock_hash(); __pv_init_lock_hash();
......
...@@ -1265,7 +1265,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) ...@@ -1265,7 +1265,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
return 1; return kvm_skip_emulated_instruction(vcpu);
} }
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
...@@ -1296,8 +1296,10 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) ...@@ -1296,8 +1296,10 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
if (param & ~KVM_HYPERV_CONN_ID_MASK) if (param & ~KVM_HYPERV_CONN_ID_MASK)
return HV_STATUS_INVALID_HYPERCALL_INPUT; return HV_STATUS_INVALID_HYPERCALL_INPUT;
/* conn_to_evt is protected by vcpu->kvm->srcu */ /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
rcu_read_lock();
eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
rcu_read_unlock();
if (!eventfd) if (!eventfd)
return HV_STATUS_INVALID_PORT_ID; return HV_STATUS_INVALID_PORT_ID;
......
...@@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void) ...@@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void)
SECONDARY_EXEC_ENABLE_VMFUNC; SECONDARY_EXEC_ENABLE_VMFUNC;
} }
static bool vmx_umip_emulated(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_DESC;
}
static inline bool report_flexpriority(void) static inline bool report_flexpriority(void)
{ {
return flexpriority_enabled; return flexpriority_enabled;
...@@ -4761,14 +4767,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -4761,14 +4767,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
else else
hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON; hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) { if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, if (cr4 & X86_CR4_UMIP) {
SECONDARY_EXEC_DESC); vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
hw_cr4 &= ~X86_CR4_UMIP;
} else if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC); SECONDARY_EXEC_DESC);
hw_cr4 &= ~X86_CR4_UMIP;
} else if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC);
}
if (cr4 & X86_CR4_VMXE) { if (cr4 & X86_CR4_VMXE) {
/* /*
...@@ -9497,12 +9505,6 @@ static bool vmx_xsaves_supported(void) ...@@ -9497,12 +9505,6 @@ static bool vmx_xsaves_supported(void)
SECONDARY_EXEC_XSAVES; SECONDARY_EXEC_XSAVES;
} }
static bool vmx_umip_emulated(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_DESC;
}
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{ {
u32 exit_intr_info; u32 exit_intr_info;
......
...@@ -114,7 +114,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); ...@@ -114,7 +114,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
static bool __read_mostly report_ignored_msrs = true; static bool __read_mostly report_ignored_msrs = true;
module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR); module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
unsigned int min_timer_period_us = 500; unsigned int min_timer_period_us = 200;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR); module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
static bool __read_mostly kvmclock_periodic_sync = true; static bool __read_mostly kvmclock_periodic_sync = true;
...@@ -843,7 +843,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4); ...@@ -843,7 +843,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
cr3 &= ~CR3_PCID_INVD; bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
if (pcid_enabled)
cr3 &= ~CR3_PCID_INVD;
#endif #endif
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
...@@ -6671,12 +6674,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) ...@@ -6671,12 +6674,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{ {
unsigned long nr, a0, a1, a2, a3, ret; unsigned long nr, a0, a1, a2, a3, ret;
int op_64_bit, r; int op_64_bit;
r = kvm_skip_emulated_instruction(vcpu);
if (kvm_hv_hypercall_enabled(vcpu->kvm)) if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
return kvm_hv_hypercall(vcpu); if (!kvm_hv_hypercall(vcpu))
return 0;
goto out;
}
nr = kvm_register_read(vcpu, VCPU_REGS_RAX); nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
...@@ -6697,7 +6701,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ...@@ -6697,7 +6701,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (kvm_x86_ops->get_cpl(vcpu) != 0) { if (kvm_x86_ops->get_cpl(vcpu) != 0) {
ret = -KVM_EPERM; ret = -KVM_EPERM;
goto out; goto out_error;
} }
switch (nr) { switch (nr) {
...@@ -6717,12 +6721,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ...@@ -6717,12 +6721,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
ret = -KVM_ENOSYS; ret = -KVM_ENOSYS;
break; break;
} }
out: out_error:
if (!op_64_bit) if (!op_64_bit)
ret = (u32)ret; ret = (u32)ret;
kvm_register_write(vcpu, VCPU_REGS_RAX, ret); kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
out:
++vcpu->stat.hypercalls; ++vcpu->stat.hypercalls;
return r; return kvm_skip_emulated_instruction(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
......
...@@ -1045,13 +1045,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) ...@@ -1045,13 +1045,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
#ifdef CONFIG_S390 #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */
#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
#elif defined(CONFIG_ARM64)
#define KVM_MAX_IRQ_ROUTES 4096
#else
#define KVM_MAX_IRQ_ROUTES 1024
#endif
bool kvm_arch_can_set_irq_routing(struct kvm *kvm); bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm, int kvm_set_irq_routing(struct kvm *kvm,
......
...@@ -15,7 +15,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M)) ...@@ -15,7 +15,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
INSTALL_HDR_PATH = $(top_srcdir)/usr INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
# After inclusion, $(OUTPUT) is defined and # After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/ # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <errno.h> #include <errno.h>
#include <unistd.h> #include <unistd.h>
#include <fcntl.h> #include <fcntl.h>
#include "kselftest.h"
ssize_t test_write(int fd, const void *buf, size_t count); ssize_t test_write(int fd, const void *buf, size_t count);
ssize_t test_read(int fd, void *buf, size_t count); ssize_t test_read(int fd, void *buf, size_t count);
......
...@@ -50,8 +50,8 @@ int kvm_check_cap(long cap) ...@@ -50,8 +50,8 @@ int kvm_check_cap(long cap)
int kvm_fd; int kvm_fd;
kvm_fd = open(KVM_DEV_PATH, O_RDONLY); kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", if (kvm_fd < 0)
KVM_DEV_PATH, kvm_fd, errno); exit(KSFT_SKIP);
ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
...@@ -91,8 +91,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) ...@@ -91,8 +91,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
vm->mode = mode; vm->mode = mode;
kvm_fd = open(KVM_DEV_PATH, perm); kvm_fd = open(KVM_DEV_PATH, perm);
TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", if (kvm_fd < 0)
KVM_DEV_PATH, kvm_fd, errno); exit(KSFT_SKIP);
/* Create VM. */ /* Create VM. */
vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL); vm->fd = ioctl(kvm_fd, KVM_CREATE_VM, NULL);
...@@ -418,8 +418,8 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void) ...@@ -418,8 +418,8 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
cpuid = allocate_kvm_cpuid2(); cpuid = allocate_kvm_cpuid2();
kvm_fd = open(KVM_DEV_PATH, O_RDONLY); kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
TEST_ASSERT(kvm_fd >= 0, "open %s failed, rc: %i errno: %i", if (kvm_fd < 0)
KVM_DEV_PATH, kvm_fd, errno); exit(KSFT_SKIP);
ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
...@@ -675,8 +675,8 @@ static int vcpu_mmap_sz(void) ...@@ -675,8 +675,8 @@ static int vcpu_mmap_sz(void)
int dev_fd, ret; int dev_fd, ret;
dev_fd = open(KVM_DEV_PATH, O_RDONLY); dev_fd = open(KVM_DEV_PATH, O_RDONLY);
TEST_ASSERT(dev_fd >= 0, "%s open %s failed, rc: %i errno: %i", if (dev_fd < 0)
__func__, KVM_DEV_PATH, dev_fd, errno); exit(KSFT_SKIP);
ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
TEST_ASSERT(ret >= sizeof(struct kvm_run), TEST_ASSERT(ret >= sizeof(struct kvm_run),
......
...@@ -85,6 +85,9 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left, ...@@ -85,6 +85,9 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left,
{ {
} }
#define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
#define INVALID_SYNC_FIELD 0x80000000
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
struct kvm_vm *vm; struct kvm_vm *vm;
...@@ -98,9 +101,14 @@ int main(int argc, char *argv[]) ...@@ -98,9 +101,14 @@ int main(int argc, char *argv[])
setbuf(stdout, NULL); setbuf(stdout, NULL);
cap = kvm_check_cap(KVM_CAP_SYNC_REGS); cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
TEST_ASSERT((unsigned long)cap == KVM_SYNC_X86_VALID_FIELDS, if ((cap & TEST_SYNC_FIELDS) != TEST_SYNC_FIELDS) {
"KVM_CAP_SYNC_REGS (0x%x) != KVM_SYNC_X86_VALID_FIELDS (0x%lx)\n", fprintf(stderr, "KVM_CAP_SYNC_REGS not supported, skipping test\n");
cap, KVM_SYNC_X86_VALID_FIELDS); exit(KSFT_SKIP);
}
if ((cap & INVALID_SYNC_FIELD) != 0) {
fprintf(stderr, "The \"invalid\" field is not invalid, skipping test\n");
exit(KSFT_SKIP);
}
/* Create VM */ /* Create VM */
vm = vm_create_default(VCPU_ID, guest_code); vm = vm_create_default(VCPU_ID, guest_code);
...@@ -108,7 +116,14 @@ int main(int argc, char *argv[]) ...@@ -108,7 +116,14 @@ int main(int argc, char *argv[])
run = vcpu_state(vm, VCPU_ID); run = vcpu_state(vm, VCPU_ID);
/* Request reading invalid register set from VCPU. */ /* Request reading invalid register set from VCPU. */
run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS << 1; run->kvm_valid_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID); rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d\n",
...@@ -116,7 +131,14 @@ int main(int argc, char *argv[]) ...@@ -116,7 +131,14 @@ int main(int argc, char *argv[])
vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0; vcpu_state(vm, VCPU_ID)->kvm_valid_regs = 0;
/* Request setting invalid register set into VCPU. */ /* Request setting invalid register set into VCPU. */
run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS << 1; run->kvm_dirty_regs = INVALID_SYNC_FIELD;
rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
rv);
vcpu_state(vm, VCPU_ID)->kvm_dirty_regs = 0;
run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID); rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(rv < 0 && errno == EINVAL, TEST_ASSERT(rv < 0 && errno == EINVAL,
"Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n", "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d\n",
...@@ -125,7 +147,7 @@ int main(int argc, char *argv[]) ...@@ -125,7 +147,7 @@ int main(int argc, char *argv[])
/* Request and verify all valid register sets. */ /* Request and verify all valid register sets. */
/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
rv = _vcpu_run(vm, VCPU_ID); rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Unexpected exit reason: %u (%s),\n", "Unexpected exit reason: %u (%s),\n",
...@@ -146,7 +168,7 @@ int main(int argc, char *argv[]) ...@@ -146,7 +168,7 @@ int main(int argc, char *argv[])
run->s.regs.sregs.apic_base = 1 << 11; run->s.regs.sregs.apic_base = 1 << 11;
/* TODO run->s.regs.events.XYZ = ABC; */ /* TODO run->s.regs.events.XYZ = ABC; */
run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
rv = _vcpu_run(vm, VCPU_ID); rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
...@@ -172,7 +194,7 @@ int main(int argc, char *argv[]) ...@@ -172,7 +194,7 @@ int main(int argc, char *argv[])
/* Clear kvm_dirty_regs bits, verify new s.regs values are /* Clear kvm_dirty_regs bits, verify new s.regs values are
* overwritten with existing guest values. * overwritten with existing guest values.
*/ */
run->kvm_valid_regs = KVM_SYNC_X86_VALID_FIELDS; run->kvm_valid_regs = TEST_SYNC_FIELDS;
run->kvm_dirty_regs = 0; run->kvm_dirty_regs = 0;
run->s.regs.regs.r11 = 0xDEADBEEF; run->s.regs.regs.r11 = 0xDEADBEEF;
rv = _vcpu_run(vm, VCPU_ID); rv = _vcpu_run(vm, VCPU_ID);
...@@ -211,7 +233,7 @@ int main(int argc, char *argv[]) ...@@ -211,7 +233,7 @@ int main(int argc, char *argv[])
* with kvm_sync_regs values. * with kvm_sync_regs values.
*/ */
run->kvm_valid_regs = 0; run->kvm_valid_regs = 0;
run->kvm_dirty_regs = KVM_SYNC_X86_VALID_FIELDS; run->kvm_dirty_regs = TEST_SYNC_FIELDS;
run->s.regs.regs.r11 = 0xBBBB; run->s.regs.regs.r11 = 0xBBBB;
rv = _vcpu_run(vm, VCPU_ID); rv = _vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
......
...@@ -189,8 +189,8 @@ int main(int argc, char *argv[]) ...@@ -189,8 +189,8 @@ int main(int argc, char *argv[])
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
if (!(entry->ecx & CPUID_VMX)) { if (!(entry->ecx & CPUID_VMX)) {
printf("nested VMX not enabled, skipping test"); fprintf(stderr, "nested VMX not enabled, skipping test\n");
return 0; exit(KSFT_SKIP);
} }
vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code); vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
......
...@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v) ...@@ -211,6 +211,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
struct vgic_state_iter *iter = (struct vgic_state_iter *)v; struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
struct vgic_irq *irq; struct vgic_irq *irq;
struct kvm_vcpu *vcpu = NULL; struct kvm_vcpu *vcpu = NULL;
unsigned long flags;
if (iter->dist_id == 0) { if (iter->dist_id == 0) {
print_dist_state(s, &kvm->arch.vgic); print_dist_state(s, &kvm->arch.vgic);
...@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) ...@@ -227,9 +228,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS]; irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
} }
spin_lock(&irq->irq_lock); spin_lock_irqsave(&irq->irq_lock, flags);
print_irq_state(s, irq, vcpu); print_irq_state(s, irq, vcpu);
spin_unlock(&irq->irq_lock); spin_unlock_irqrestore(&irq->irq_lock, flags);
return 0; return 0;
} }
......
...@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -52,6 +52,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq; struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
unsigned long flags;
int ret; int ret;
/* In this case there is no put, since we keep the reference. */ /* In this case there is no put, since we keep the reference. */
...@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -71,7 +72,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
irq->intid = intid; irq->intid = intid;
irq->target_vcpu = vcpu; irq->target_vcpu = vcpu;
spin_lock(&dist->lpi_list_lock); spin_lock_irqsave(&dist->lpi_list_lock, flags);
/* /*
* There could be a race with another vgic_add_lpi(), so we need to * There could be a race with another vgic_add_lpi(), so we need to
...@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, ...@@ -99,7 +100,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
dist->lpi_list_count++; dist->lpi_list_count++;
out_unlock: out_unlock:
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
/* /*
* We "cache" the configuration table entries in our struct vgic_irq's. * We "cache" the configuration table entries in our struct vgic_irq's.
...@@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, ...@@ -280,8 +281,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
int ret; int ret;
unsigned long flags; unsigned long flags;
ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
&prop, 1); &prop, 1);
if (ret) if (ret)
return ret; return ret;
...@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -315,6 +316,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_irq *irq; struct vgic_irq *irq;
unsigned long flags;
u32 *intids; u32 *intids;
int irq_count, i = 0; int irq_count, i = 0;
...@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -330,7 +332,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
if (!intids) if (!intids)
return -ENOMEM; return -ENOMEM;
spin_lock(&dist->lpi_list_lock); spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (i == irq_count) if (i == irq_count)
break; break;
...@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -339,7 +341,7 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
continue; continue;
intids[i++] = irq->intid; intids[i++] = irq->intid;
} }
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
*intid_ptr = intids; *intid_ptr = intids;
return i; return i;
...@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr) ...@@ -348,10 +350,11 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
{ {
int ret = 0; int ret = 0;
unsigned long flags;
spin_lock(&irq->irq_lock); spin_lock_irqsave(&irq->irq_lock, flags);
irq->target_vcpu = vcpu; irq->target_vcpu = vcpu;
spin_unlock(&irq->irq_lock); spin_unlock_irqrestore(&irq->irq_lock, flags);
if (irq->hw) { if (irq->hw) {
struct its_vlpi_map map; struct its_vlpi_map map;
...@@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) ...@@ -441,8 +444,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
* this very same byte in the last iteration. Reuse that. * this very same byte in the last iteration. Reuse that.
*/ */
if (byte_offset != last_byte_offset) { if (byte_offset != last_byte_offset) {
ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset, ret = kvm_read_guest_lock(vcpu->kvm,
&pendmask, 1); pendbase + byte_offset,
&pendmask, 1);
if (ret) { if (ret) {
kfree(intids); kfree(intids);
return ret; return ret;
...@@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id, ...@@ -786,7 +790,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
return false; return false;
/* Each 1st level entry is represented by a 64-bit value. */ /* Each 1st level entry is represented by a 64-bit value. */
if (kvm_read_guest(its->dev->kvm, if (kvm_read_guest_lock(its->dev->kvm,
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr), BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
&indirect_ptr, sizeof(indirect_ptr))) &indirect_ptr, sizeof(indirect_ptr)))
return false; return false;
...@@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its) ...@@ -1367,8 +1371,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
cbaser = CBASER_ADDRESS(its->cbaser); cbaser = CBASER_ADDRESS(its->cbaser);
while (its->cwriter != its->creadr) { while (its->cwriter != its->creadr) {
int ret = kvm_read_guest(kvm, cbaser + its->creadr, int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
cmd_buf, ITS_CMD_SIZE); cmd_buf, ITS_CMD_SIZE);
/* /*
* If kvm_read_guest() fails, this could be due to the guest * If kvm_read_guest() fails, this could be due to the guest
* programming a bogus value in CBASER or something else going * programming a bogus value in CBASER or something else going
...@@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz, ...@@ -1893,7 +1897,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, int esz,
int next_offset; int next_offset;
size_t byte_offset; size_t byte_offset;
ret = kvm_read_guest(kvm, gpa, entry, esz); ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
if (ret) if (ret)
return ret; return ret;
...@@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) ...@@ -2263,7 +2267,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
int ret; int ret;
BUG_ON(esz > sizeof(val)); BUG_ON(esz > sizeof(val));
ret = kvm_read_guest(kvm, gpa, &val, esz); ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
if (ret) if (ret)
return ret; return ret;
val = le64_to_cpu(val); val = le64_to_cpu(val);
......
...@@ -344,7 +344,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) ...@@ -344,7 +344,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
bit_nr = irq->intid % BITS_PER_BYTE; bit_nr = irq->intid % BITS_PER_BYTE;
ptr = pendbase + byte_offset; ptr = pendbase + byte_offset;
ret = kvm_read_guest(kvm, ptr, &val, 1); ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
if (ret) if (ret)
return ret; return ret;
...@@ -397,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) ...@@ -397,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
ptr = pendbase + byte_offset; ptr = pendbase + byte_offset;
if (byte_offset != last_byte_offset) { if (byte_offset != last_byte_offset) {
ret = kvm_read_guest(kvm, ptr, &val, 1); ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
if (ret) if (ret)
return ret; return ret;
last_byte_offset = byte_offset; last_byte_offset = byte_offset;
......
...@@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -43,9 +43,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* kvm->lock (mutex) * kvm->lock (mutex)
* its->cmd_lock (mutex) * its->cmd_lock (mutex)
* its->its_lock (mutex) * its->its_lock (mutex)
* vgic_cpu->ap_list_lock * vgic_cpu->ap_list_lock must be taken with IRQs disabled
* kvm->lpi_list_lock * kvm->lpi_list_lock must be taken with IRQs disabled
* vgic_irq->irq_lock * vgic_irq->irq_lock must be taken with IRQs disabled
*
* As the ap_list_lock might be taken from the timer interrupt handler,
* we have to disable IRQs before taking this lock and everything lower
* than it.
* *
* If you need to take multiple locks, always take the upper lock first, * If you need to take multiple locks, always take the upper lock first,
* then the lower ones, e.g. first take the its_lock, then the irq_lock. * then the lower ones, e.g. first take the its_lock, then the irq_lock.
...@@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) ...@@ -72,8 +76,9 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq = NULL; struct vgic_irq *irq = NULL;
unsigned long flags;
spin_lock(&dist->lpi_list_lock); spin_lock_irqsave(&dist->lpi_list_lock, flags);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
if (irq->intid != intid) if (irq->intid != intid)
...@@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) ...@@ -89,7 +94,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
irq = NULL; irq = NULL;
out_unlock: out_unlock:
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq; return irq;
} }
...@@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref) ...@@ -134,19 +139,20 @@ static void vgic_irq_release(struct kref *ref)
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
unsigned long flags;
if (irq->intid < VGIC_MIN_LPI) if (irq->intid < VGIC_MIN_LPI)
return; return;
spin_lock(&dist->lpi_list_lock); spin_lock_irqsave(&dist->lpi_list_lock, flags);
if (!kref_put(&irq->refcount, vgic_irq_release)) { if (!kref_put(&irq->refcount, vgic_irq_release)) {
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return; return;
}; };
list_del(&irq->lpi_list); list_del(&irq->lpi_list);
dist->lpi_list_count--; dist->lpi_list_count--;
spin_unlock(&dist->lpi_list_lock); spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
kfree(irq); kfree(irq);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部