提交 b2869f28 编写于 作者: G Gustavo A. R. Silva 提交者: Paolo Bonzini

KVM: x86: Mark expected switch fall-throughs

In preparation to enabling -Wimplicit-fallthrough, mark switch
cases where we are expecting to fall through.

This patch fixes the following warnings:

arch/x86/kvm/lapic.c:1037:27: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/lapic.c:1876:3: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/hyperv.c:1637:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/svm.c:4396:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/mmu.c:4372:36: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/x86.c:3835:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/x86.c:7938:23: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/vmx/vmx.c:2015:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
arch/x86/kvm/vmx/vmx.c:1773:6: warning: this statement may fall through [-Wimplicit-fallthrough=]

Warning level 3 was used: -Wimplicit-fallthrough=3

This patch is part of the ongoing efforts to enabling -Wimplicit-fallthrough.
Signed-off-by: NGustavo A. R. Silva <gustavo@embeddedor.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 5cd5548f
...@@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ...@@ -1636,7 +1636,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
if (ret != HV_STATUS_INVALID_PORT_ID) if (ret != HV_STATUS_INVALID_PORT_ID)
break; break;
/* maybe userspace knows this conn_id: fall through */ /* fall through - maybe userspace knows this conn_id. */
case HVCALL_POST_MESSAGE: case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */ /* don't bother userspace if it has no way to handle it */
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
......
...@@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -1035,6 +1035,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
switch (delivery_mode) { switch (delivery_mode) {
case APIC_DM_LOWEST: case APIC_DM_LOWEST:
vcpu->arch.apic_arb_prio++; vcpu->arch.apic_arb_prio++;
/* fall through */
case APIC_DM_FIXED: case APIC_DM_FIXED:
if (unlikely(trig_mode && !level)) if (unlikely(trig_mode && !level))
break; break;
...@@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) ...@@ -1874,6 +1875,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
case APIC_LVT0: case APIC_LVT0:
apic_manage_nmi_watchdog(apic, val); apic_manage_nmi_watchdog(apic, val);
/* fall through */
case APIC_LVTTHMR: case APIC_LVTTHMR:
case APIC_LVTPC: case APIC_LVTPC:
case APIC_LVT1: case APIC_LVT1:
......
...@@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, ...@@ -4371,6 +4371,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
rsvd_bits(maxphyaddr, 51); rsvd_bits(maxphyaddr, 51);
rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[1][4] =
rsvd_check->rsvd_bits_mask[0][4]; rsvd_check->rsvd_bits_mask[0][4];
/* fall through */
case PT64_ROOT_4LEVEL: case PT64_ROOT_4LEVEL:
rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
nonleaf_bit8_rsvd | rsvd_bits(7, 7) | nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
......
...@@ -4403,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -4403,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
if (kvm_vcpu_apicv_active(vcpu)) if (kvm_vcpu_apicv_active(vcpu))
avic_update_vapic_bar(to_svm(vcpu), data); avic_update_vapic_bar(to_svm(vcpu), data);
/* Follow through */ /* Fall through */
default: default:
return kvm_set_msr_common(vcpu, msr); return kvm_set_msr_common(vcpu, msr);
} }
......
...@@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1773,7 +1773,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated && if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP)) !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
return 1; return 1;
/* Otherwise falls through */ /* Else, falls through */
default: default:
msr = find_msr_entry(vmx, msr_info->index); msr = find_msr_entry(vmx, msr_info->index);
if (msr) { if (msr) {
...@@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2014,7 +2014,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* Check reserved bit, higher 32 bits should be zero */ /* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0) if ((data >> 32) != 0)
return 1; return 1;
/* Otherwise falls through */ /* Else, falls through */
default: default:
msr = find_msr_entry(vmx, msr_index); msr = find_msr_entry(vmx, msr_index);
if (msr) { if (msr) {
......
...@@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, ...@@ -3834,6 +3834,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
case KVM_CAP_HYPERV_SYNIC2: case KVM_CAP_HYPERV_SYNIC2:
if (cap->args[0]) if (cap->args[0])
return -EINVAL; return -EINVAL;
/* fall through */
case KVM_CAP_HYPERV_SYNIC: case KVM_CAP_HYPERV_SYNIC:
if (!irqchip_in_kernel(vcpu->kvm)) if (!irqchip_in_kernel(vcpu->kvm))
return -EINVAL; return -EINVAL;
...@@ -7936,6 +7938,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -7936,6 +7938,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
vcpu->arch.pv.pv_unhalted = false; vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state = vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE; KVM_MP_STATE_RUNNABLE;
/* fall through */
case KVM_MP_STATE_RUNNABLE: case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false; vcpu->arch.apf.halted = false;
break; break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册