diff mbox series

[RFC,v2,65/78] hw/nvme: add fallthrough pseudo-keyword

Message ID 19e648431bdf3967c309a17b74fe775d92a72dc9.1697183699.git.manos.pitsidianakis@linaro.org (mailing list archive)
State New, archived
Headers show
Series Strict disable implicit fallthrough | expand

Commit Message

Manos Pitsidianakis Oct. 13, 2023, 7:57 a.m. UTC
In preparation of raising -Wimplicit-fallthrough to 5, replace all
fall-through comments with the fallthrough attribute pseudo-keyword.

Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
---
 hw/nvme/ctrl.c | 24 ++++++++++++------------
 hw/nvme/dif.c  |  4 ++--
 2 files changed, 14 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index f026245d1e..acb2012fb9 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -1912,29 +1912,29 @@  static uint16_t nvme_check_zone_read(NvmeNamespace *ns, uint64_t slba,
 static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone)
 {
     switch (nvme_get_zone_state(zone)) {
     case NVME_ZONE_STATE_FULL:
         return NVME_SUCCESS;
 
     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
         nvme_aor_dec_open(ns);
-        /* fallthrough */
+        fallthrough;
     case NVME_ZONE_STATE_CLOSED:
         nvme_aor_dec_active(ns);
 
         if (zone->d.za & NVME_ZA_ZRWA_VALID) {
             zone->d.za &= ~NVME_ZA_ZRWA_VALID;
             if (ns->params.numzrwa) {
                 ns->zns.numzrwa++;
             }
         }
 
-        /* fallthrough */
+        fallthrough;
     case NVME_ZONE_STATE_EMPTY:
         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_FULL);
         return NVME_SUCCESS;
 
     default:
         return NVME_ZONE_INVAL_TRANSITION;
     }
 }
@@ -1942,15 +1942,15 @@  static uint16_t nvme_zrm_finish(NvmeNamespace *ns, NvmeZone *zone)
 static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone)
 {
     switch (nvme_get_zone_state(zone)) {
     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
         nvme_aor_dec_open(ns);
         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_CLOSED);
-        /* fall through */
+        fallthrough;
     case NVME_ZONE_STATE_CLOSED:
         return NVME_SUCCESS;
 
     default:
         return NVME_ZONE_INVAL_TRANSITION;
     }
 }
@@ -1958,29 +1958,29 @@  static uint16_t nvme_zrm_close(NvmeNamespace *ns, NvmeZone *zone)
 static uint16_t nvme_zrm_reset(NvmeNamespace *ns, NvmeZone *zone)
 {
     switch (nvme_get_zone_state(zone)) {
     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
         nvme_aor_dec_open(ns);
-        /* fallthrough */
+        fallthrough;
     case NVME_ZONE_STATE_CLOSED:
         nvme_aor_dec_active(ns);
 
         if (zone->d.za & NVME_ZA_ZRWA_VALID) {
             if (ns->params.numzrwa) {
                 ns->zns.numzrwa++;
             }
         }
 
-        /* fallthrough */
+        fallthrough;
     case NVME_ZONE_STATE_FULL:
         zone->w_ptr = zone->d.zslba;
         zone->d.wp = zone->w_ptr;
         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EMPTY);
-        /* fallthrough */
+        fallthrough;
     case NVME_ZONE_STATE_EMPTY:
         return NVME_SUCCESS;
 
     default:
         return NVME_ZONE_INVAL_TRANSITION;
     }
 }
@@ -2010,57 +2010,57 @@  enum {
 static uint16_t nvme_zrm_open_flags(NvmeCtrl *n, NvmeNamespace *ns,
                                     NvmeZone *zone, int flags)
 {
     int act = 0;
     uint16_t status;
 
     switch (nvme_get_zone_state(zone)) {
     case NVME_ZONE_STATE_EMPTY:
         act = 1;
 
-        /* fallthrough */
+        fallthrough;
 
     case NVME_ZONE_STATE_CLOSED:
         if (n->params.auto_transition_zones) {
             nvme_zrm_auto_transition_zone(ns);
         }
         status = nvme_zns_check_resources(ns, act, 1,
                                           (flags & NVME_ZRM_ZRWA) ? 1 : 0);
         if (status) {
             return status;
         }
 
         if (act) {
             nvme_aor_inc_active(ns);
         }
 
         nvme_aor_inc_open(ns);
 
         if (flags & NVME_ZRM_AUTO) {
             nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_IMPLICITLY_OPEN);
             return NVME_SUCCESS;
         }
 
-        /* fallthrough */
+        fallthrough;
 
     case NVME_ZONE_STATE_IMPLICITLY_OPEN:
         if (flags & NVME_ZRM_AUTO) {
             return NVME_SUCCESS;
         }
 
         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_EXPLICITLY_OPEN);
 
-        /* fallthrough */
+        fallthrough;
 
     case NVME_ZONE_STATE_EXPLICITLY_OPEN:
         if (flags & NVME_ZRM_ZRWA) {
             ns->zns.numzrwa--;
 
             zone->d.za |= NVME_ZA_ZRWA_VALID;
         }
 
         return NVME_SUCCESS;
 
     default:
         return NVME_ZONE_INVAL_TRANSITION;
     }
 }
@@ -3508,135 +3508,135 @@  static void nvme_do_write_fdp(NvmeCtrl *n, NvmeRequest *req, uint64_t slba,
 static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append,
                               bool wrz)
 {
     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
     NvmeNamespace *ns = req->ns;
     uint64_t slba = le64_to_cpu(rw->slba);
     uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
     uint16_t ctrl = le16_to_cpu(rw->control);
     uint8_t prinfo = NVME_RW_PRINFO(ctrl);
     uint64_t data_size = nvme_l2b(ns, nlb);
     uint64_t mapped_size = data_size;
     uint64_t data_offset;
     NvmeZone *zone;
     NvmeZonedResult *res = (NvmeZonedResult *)&req->cqe;
     BlockBackend *blk = ns->blkconf.blk;
     uint16_t status;
 
     if (nvme_ns_ext(ns)) {
         mapped_size += nvme_m2b(ns, nlb);
 
         if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
             bool pract = prinfo & NVME_PRINFO_PRACT;
 
             if (pract && ns->lbaf.ms == nvme_pi_tuple_size(ns)) {
                 mapped_size -= nvme_m2b(ns, nlb);
             }
         }
     }
 
     trace_pci_nvme_write(nvme_cid(req), nvme_io_opc_str(rw->opcode),
                          nvme_nsid(ns), nlb, mapped_size, slba);
 
     if (!wrz) {
         status = nvme_check_mdts(n, mapped_size);
         if (status) {
             goto invalid;
         }
     }
 
     status = nvme_check_bounds(ns, slba, nlb);
     if (status) {
         goto invalid;
     }
 
     if (ns->params.zoned) {
         zone = nvme_get_zone_by_slba(ns, slba);
         assert(zone);
 
         if (append) {
             bool piremap = !!(ctrl & NVME_RW_PIREMAP);
 
             if (unlikely(zone->d.za & NVME_ZA_ZRWA_VALID)) {
                 return NVME_INVALID_ZONE_OP | NVME_DNR;
             }
 
             if (unlikely(slba != zone->d.zslba)) {
                 trace_pci_nvme_err_append_not_at_start(slba, zone->d.zslba);
                 status = NVME_INVALID_FIELD;
                 goto invalid;
             }
 
             if (n->params.zasl &&
                 data_size > (uint64_t)n->page_size << n->params.zasl) {
                 trace_pci_nvme_err_zasl(data_size);
                 return NVME_INVALID_FIELD | NVME_DNR;
             }
 
             slba = zone->w_ptr;
             rw->slba = cpu_to_le64(slba);
             res->slba = cpu_to_le64(slba);
 
             switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
             case NVME_ID_NS_DPS_TYPE_1:
                 if (!piremap) {
                     return NVME_INVALID_PROT_INFO | NVME_DNR;
                 }
 
-                /* fallthrough */
+                fallthrough;
 
             case NVME_ID_NS_DPS_TYPE_2:
                 if (piremap) {
                     uint32_t reftag = le32_to_cpu(rw->reftag);
                     rw->reftag = cpu_to_le32(reftag + (slba - zone->d.zslba));
                 }
 
                 break;
 
             case NVME_ID_NS_DPS_TYPE_3:
                 if (piremap) {
                     return NVME_INVALID_PROT_INFO | NVME_DNR;
                 }
 
                 break;
             }
         }
 
         status = nvme_check_zone_write(ns, zone, slba, nlb);
         if (status) {
             goto invalid;
         }
 
         status = nvme_zrm_auto(n, ns, zone);
         if (status) {
             goto invalid;
         }
 
         if (!(zone->d.za & NVME_ZA_ZRWA_VALID)) {
             zone->w_ptr += nlb;
         }
     } else if (ns->endgrp && ns->endgrp->fdp.enabled) {
         nvme_do_write_fdp(n, req, slba, nlb);
     }
 
     data_offset = nvme_l2b(ns, slba);
 
     if (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
         return nvme_dif_rw(n, req);
     }
 
     if (!wrz) {
         status = nvme_map_data(n, nlb, req);
         if (status) {
             goto invalid;
         }
 
         block_acct_start(blk_get_stats(blk), &req->acct, data_size,
                          BLOCK_ACCT_WRITE);
         nvme_blk_write(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req);
     } else {
         req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size,
                                            BDRV_REQ_MAY_UNMAP, nvme_rw_cb,
                                            req);
     }
 
     return NVME_NO_COMPLETE;
@@ -3734,13 +3734,13 @@  static uint16_t nvme_finish_zone(NvmeNamespace *ns, NvmeZone *zone,
 static uint16_t nvme_offline_zone(NvmeNamespace *ns, NvmeZone *zone,
                                   NvmeZoneState state, NvmeRequest *req)
 {
     switch (state) {
     case NVME_ZONE_STATE_READ_ONLY:
         nvme_assign_zone_state(ns, zone, NVME_ZONE_STATE_OFFLINE);
-        /* fall through */
+        fallthrough;
     case NVME_ZONE_STATE_OFFLINE:
         return NVME_SUCCESS;
     default:
         return NVME_ZONE_INVAL_TRANSITION;
     }
 }
@@ -4902,39 +4902,39 @@  static uint16_t nvme_changed_nslist(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
 static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint8_t csi, uint32_t buf_len,
                                  uint64_t off, NvmeRequest *req)
 {
     NvmeEffectsLog log = {};
     const uint32_t *src_iocs = NULL;
     uint32_t trans_len;
 
     if (off >= sizeof(log)) {
         trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(log));
         return NVME_INVALID_FIELD | NVME_DNR;
     }
 
     switch (NVME_CC_CSS(ldl_le_p(&n->bar.cc))) {
     case NVME_CC_CSS_NVM:
         src_iocs = nvme_cse_iocs_nvm;
-        /* fall through */
+        fallthrough;
     case NVME_CC_CSS_ADMIN_ONLY:
         break;
     case NVME_CC_CSS_CSI:
         switch (csi) {
         case NVME_CSI_NVM:
             src_iocs = nvme_cse_iocs_nvm;
             break;
         case NVME_CSI_ZONED:
             src_iocs = nvme_cse_iocs_zoned;
             break;
         }
     }
 
     memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs));
 
     if (src_iocs) {
         memcpy(log.iocs, src_iocs, sizeof(log.iocs));
     }
 
     trans_len = MIN(sizeof(log) - off, buf_len);
 
     return nvme_c2h(n, ((uint8_t *)&log) + off, trans_len, req);
 }
diff --git a/hw/nvme/dif.c b/hw/nvme/dif.c
index 01b19c3373..00dd96bdb3 100644
--- a/hw/nvme/dif.c
+++ b/hw/nvme/dif.c
@@ -153,58 +153,58 @@  void nvme_dif_pract_generate_dif(NvmeNamespace *ns, uint8_t *buf, size_t len,
 static uint16_t nvme_dif_prchk_crc16(NvmeNamespace *ns, NvmeDifTuple *dif,
                                      uint8_t *buf, uint8_t *mbuf, size_t pil,
                                      uint8_t prinfo, uint16_t apptag,
                                      uint16_t appmask, uint64_t reftag)
 {
     switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
     case NVME_ID_NS_DPS_TYPE_3:
         if (be32_to_cpu(dif->g16.reftag) != 0xffffffff) {
             break;
         }
 
-        /* fallthrough */
+        fallthrough;
     case NVME_ID_NS_DPS_TYPE_1:
     case NVME_ID_NS_DPS_TYPE_2:
         if (be16_to_cpu(dif->g16.apptag) != 0xffff) {
             break;
         }
 
         trace_pci_nvme_dif_prchk_disabled_crc16(be16_to_cpu(dif->g16.apptag),
                                                 be32_to_cpu(dif->g16.reftag));
 
         return NVME_SUCCESS;
     }
 
     if (prinfo & NVME_PRINFO_PRCHK_GUARD) {
         uint16_t crc = crc16_t10dif(0x0, buf, ns->lbasz);
 
         if (pil) {
             crc = crc16_t10dif(crc, mbuf, pil);
         }
 
         trace_pci_nvme_dif_prchk_guard_crc16(be16_to_cpu(dif->g16.guard), crc);
 
         if (be16_to_cpu(dif->g16.guard) != crc) {
             return NVME_E2E_GUARD_ERROR;
         }
     }
 
     if (prinfo & NVME_PRINFO_PRCHK_APP) {
         trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->g16.apptag), apptag,
                                         appmask);
 
         if ((be16_to_cpu(dif->g16.apptag) & appmask) != (apptag & appmask)) {
             return NVME_E2E_APP_ERROR;
         }
     }
 
     if (prinfo & NVME_PRINFO_PRCHK_REF) {
         trace_pci_nvme_dif_prchk_reftag_crc16(be32_to_cpu(dif->g16.reftag),
                                               reftag);
 
         if (be32_to_cpu(dif->g16.reftag) != reftag) {
             return NVME_E2E_REF_ERROR;
         }
     }
 
     return NVME_SUCCESS;
 }
@@ -212,66 +212,66 @@  static uint16_t nvme_dif_prchk_crc16(NvmeNamespace *ns, NvmeDifTuple *dif,
 static uint16_t nvme_dif_prchk_crc64(NvmeNamespace *ns, NvmeDifTuple *dif,
                                      uint8_t *buf, uint8_t *mbuf, size_t pil,
                                      uint8_t prinfo, uint16_t apptag,
                                      uint16_t appmask, uint64_t reftag)
 {
     uint64_t r = 0;
 
     r |= (uint64_t)dif->g64.sr[0] << 40;
     r |= (uint64_t)dif->g64.sr[1] << 32;
     r |= (uint64_t)dif->g64.sr[2] << 24;
     r |= (uint64_t)dif->g64.sr[3] << 16;
     r |= (uint64_t)dif->g64.sr[4] << 8;
     r |= (uint64_t)dif->g64.sr[5];
 
     switch (NVME_ID_NS_DPS_TYPE(ns->id_ns.dps)) {
     case NVME_ID_NS_DPS_TYPE_3:
         if (r != 0xffffffffffff) {
             break;
         }
 
-        /* fallthrough */
+        fallthrough;
     case NVME_ID_NS_DPS_TYPE_1:
     case NVME_ID_NS_DPS_TYPE_2:
         if (be16_to_cpu(dif->g64.apptag) != 0xffff) {
             break;
         }
 
         trace_pci_nvme_dif_prchk_disabled_crc64(be16_to_cpu(dif->g16.apptag),
                                                 r);
 
         return NVME_SUCCESS;
     }
 
     if (prinfo & NVME_PRINFO_PRCHK_GUARD) {
         uint64_t crc = crc64_nvme(~0ULL, buf, ns->lbasz);
 
         if (pil) {
             crc = crc64_nvme(~crc, mbuf, pil);
         }
 
         trace_pci_nvme_dif_prchk_guard_crc64(be64_to_cpu(dif->g64.guard), crc);
 
         if (be64_to_cpu(dif->g64.guard) != crc) {
             return NVME_E2E_GUARD_ERROR;
         }
     }
 
     if (prinfo & NVME_PRINFO_PRCHK_APP) {
         trace_pci_nvme_dif_prchk_apptag(be16_to_cpu(dif->g64.apptag), apptag,
                                         appmask);
 
         if ((be16_to_cpu(dif->g64.apptag) & appmask) != (apptag & appmask)) {
             return NVME_E2E_APP_ERROR;
         }
     }
 
     if (prinfo & NVME_PRINFO_PRCHK_REF) {
         trace_pci_nvme_dif_prchk_reftag_crc64(r, reftag);
 
         if (r != reftag) {
             return NVME_E2E_REF_ERROR;
         }
     }
 
     return NVME_SUCCESS;
 }