@@ -339,7 +339,7 @@ static int hvmemul_do_io(
}
case X86EMUL_UNIMPLEMENTED:
ASSERT_UNREACHABLE();
- /* Fall-through */
+ fallthrough;
default:
BUG();
}
@@ -397,7 +397,6 @@ static int hvmemul_acquire_page(unsigned long gmfn, struct page_info **page)
default:
ASSERT_UNREACHABLE();
/* Fallthrough */
-
case -EINVAL:
return X86EMUL_UNHANDLEABLE;
}
@@ -2674,6 +2673,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
default:
ASSERT_UNREACHABLE();
+ break;
}
if ( hvmemul_ctxt->ctxt.retire.singlestep )
@@ -2764,6 +2764,7 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
/* fallthrough */
default:
hvm_emulate_writeback(&ctxt);
+ break;
}
return rc;
@@ -2799,10 +2800,11 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
memcpy(hvio->mmio_insn, curr->arch.vm_event->emul.insn.data,
hvio->mmio_insn_bytes);
}
- /* Fall-through */
+ fallthrough;
default:
ctx.set_context = (kind == EMUL_KIND_SET_CONTEXT_DATA);
rc = hvm_emulate_one(&ctx, VIO_no_completion);
+ break;
}
switch ( rc )
@@ -2818,7 +2820,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
case X86EMUL_UNIMPLEMENTED:
if ( hvm_monitor_emul_unimplemented() )
return;
- /* fall-through */
+ fallthrough;
case X86EMUL_UNHANDLEABLE:
hvm_dump_emulation_state(XENLOG_G_DEBUG, "Mem event", &ctx, rc);
hvm_inject_hw_exception(trapnr, errcode);
@@ -4919,6 +4919,8 @@ static int do_altp2m_op(
default:
ASSERT_UNREACHABLE();
+ rc = -EOPNOTSUPP;
+ break;
}
out:
@@ -5020,6 +5022,8 @@ static int compat_altp2m_op(
default:
ASSERT_UNREACHABLE();
+ rc = -EOPNOTSUPP;
+ break;
}
return rc;
@@ -5283,6 +5287,7 @@ void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
* %cs and %tr are unconditionally present. SVM ignores these present
* bits and will happily run without them set.
*/
+ fallthrough;
case x86_seg_cs:
reg->p = 1;
break;
@@ -110,7 +110,7 @@ int hvm_hypercall(struct cpu_user_regs *regs)
{
case 8:
eax = regs->rax;
- /* Fallthrough to permission check. */
+ fallthrough;
case 4:
case 2:
if ( currd->arch.monitor.guest_request_userspace_enabled &&
@@ -282,6 +282,7 @@ static void hvm_set_callback_irq_level(struct vcpu *v)
__hvm_pci_intx_assert(d, pdev, pintx);
else
__hvm_pci_intx_deassert(d, pdev, pintx);
+ break;
default:
break;
}
@@ -185,6 +185,7 @@ static int cf_check handle_evt_io(
gdprintk(XENLOG_WARNING,
"Bad ACPI PM register write: %x bytes (%x) at %x\n",
bytes, *val, port);
+ break;
}
}
/* Fix up the SCI state to match the new register state */
@@ -681,6 +681,7 @@ static void cf_check svm_get_segment_register(
ASSERT_UNREACHABLE();
domain_crash(v->domain);
*reg = (struct segment_register){};
+ break;
}
}
@@ -2416,6 +2417,7 @@ static void cf_check svm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
printk(XENLOG_G_ERR "%s(%pv, 0x%08x, 0x%016"PRIx64") Bad register\n",
__func__, v, reg, val);
domain_crash(d);
+ break;
}
}
@@ -367,6 +367,7 @@ static void vlapic_accept_irq(struct vcpu *v, uint32_t icr_low)
gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode in ICR %x\n",
icr_low);
domain_crash(v->domain);
+ break;
}
}
@@ -1446,6 +1446,7 @@ struct vmx_msr_entry *vmx_find_msr(const struct vcpu *v, uint32_t msr,
default:
ASSERT_UNREACHABLE();
+ return NULL;
}
if ( !start )
@@ -1598,6 +1599,7 @@ int vmx_del_msr(struct vcpu *v, uint32_t msr, enum vmx_msr_list_type type)
default:
ASSERT_UNREACHABLE();
+ return -EINVAL;
}
if ( !start )
@@ -2745,6 +2745,7 @@ static void cf_check vmx_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
printk(XENLOG_G_ERR "%s(%pv, 0x%08x, 0x%016"PRIx64") Bad register\n",
__func__, v, reg, val);
domain_crash(d);
+ break;
}
vmx_vmcs_exit(v);
}
@@ -3393,7 +3394,7 @@ static int cf_check vmx_msr_read_intercept(
*msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
/* Perhaps vpmu will change some bits. */
- /* FALLTHROUGH */
+ fallthrough;
case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
@@ -2768,6 +2768,7 @@ int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
gprintk(XENLOG_ERR, "Unhandled nested vmexit: reason %u\n",
exit_reason);
domain_crash(v->domain);
+ break;
}
return ( nvcpu->nv_vmexit_pending == 1 );
@@ -309,6 +309,7 @@ static void vpic_ioport_write(
if ( !(vpic->init_state & 8) )
break; /* CASCADE mode: wait for write to ICW3. */
/* SNGL mode: fall through (no ICW3). */
+ fallthrough;
case 2:
/* ICW3 */
vpic->init_state++;
@@ -119,8 +119,7 @@ static int pt_irq_masked(struct periodic_time *pt)
gsi = hvm_isa_irq_to_gsi(pt->irq);
}
-
- /* Fallthrough to check if the interrupt is masked on the IO APIC. */
+ fallthrough;
case PTSRC_ioapic:
{
int mask = vioapic_get_mask(v->domain, gsi);
MISRA C Rule 16.3 states that "An unconditional `break' statement shall terminate every switch-clause". Add pseudo keyword fallthrough or missing break statement to address violations of the rule. As a defensive measure, return an error message or a null pointer in case an unreachable return statement is reached. Signed-off-by: Federico Serafini <federico.serafini@bugseng.com> --- Changes in v4: - do not separate different parts of HVM: a) squash patches 8, 11 and 12 of v3 into this patch; b) address also violations of SVM and VMX; - re-arrange fallthrough positioning to comply with Coverity. Changes in v3: - squashed here modifications of pmtimer.c; - no blank line after fallthrough; - better indentation of fallthrough. --- xen/arch/x86/hvm/emulate.c | 10 ++++++---- xen/arch/x86/hvm/hvm.c | 5 +++++ xen/arch/x86/hvm/hypercall.c | 2 +- xen/arch/x86/hvm/irq.c | 1 + xen/arch/x86/hvm/pmtimer.c | 1 + xen/arch/x86/hvm/svm/svm.c | 2 ++ xen/arch/x86/hvm/vlapic.c | 1 + xen/arch/x86/hvm/vmx/vmcs.c | 2 ++ xen/arch/x86/hvm/vmx/vmx.c | 3 ++- xen/arch/x86/hvm/vmx/vvmx.c | 1 + xen/arch/x86/hvm/vpic.c | 1 + xen/arch/x86/hvm/vpt.c | 3 +-- 12 files changed, 24 insertions(+), 8 deletions(-)