diff mbox series

[RFC,v2,76/78] qemu-img.c: add fallthrough pseudo-keyword

Message ID 1fa27adce69e97741ff59dd909f957cf9a5d3525.1697183699.git.manos.pitsidianakis@linaro.org (mailing list archive)
State New, archived
Headers show
Series Strict disable implicit fallthrough | expand

Commit Message

Manos Pitsidianakis Oct. 13, 2023, 7:57 a.m. UTC
In preparation of raising -Wimplicit-fallthrough to 5, replace all
fall-through comments with the fallthrough attribute pseudo-keyword.

Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org>
---
 qemu-img.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/qemu-img.c b/qemu-img.c
index 6068ab0d27..df2457a6fe 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -1870,63 +1870,63 @@  static int coroutine_fn convert_co_read(ImgConvertState *s, int64_t sector_num,
 static int coroutine_fn convert_co_write(ImgConvertState *s, int64_t sector_num,
                                          int nb_sectors, uint8_t *buf,
                                          enum ImgConvertBlockStatus status)
 {
     int ret;
 
     while (nb_sectors > 0) {
         int n = nb_sectors;
         BdrvRequestFlags flags = s->compressed ? BDRV_REQ_WRITE_COMPRESSED : 0;
 
         switch (status) {
         case BLK_BACKING_FILE:
             /* If we have a backing file, leave clusters unallocated that are
              * unallocated in the source image, so that the backing file is
              * visible at the respective offset. */
             assert(s->target_has_backing);
             break;
 
         case BLK_DATA:
             /* If we're told to keep the target fully allocated (-S 0) or there
              * is real non-zero data, we must write it. Otherwise we can treat
              * it as zero sectors.
              * Compressed clusters need to be written as a whole, so in that
              * case we can only save the write if the buffer is completely
              * zeroed. */
             if (!s->min_sparse ||
                 (!s->compressed &&
                  is_allocated_sectors_min(buf, n, &n, s->min_sparse,
                                           sector_num, s->alignment)) ||
                 (s->compressed &&
                  !buffer_is_zero(buf, n * BDRV_SECTOR_SIZE)))
             {
                 ret = blk_co_pwrite(s->target, sector_num << BDRV_SECTOR_BITS,
                                     n << BDRV_SECTOR_BITS, buf, flags);
                 if (ret < 0) {
                     return ret;
                 }
                 break;
             }
-            /* fall-through */
+            fallthrough;
 
         case BLK_ZERO:
             if (s->has_zero_init) {
                 assert(!s->target_has_backing);
                 break;
             }
             ret = blk_co_pwrite_zeroes(s->target,
                                        sector_num << BDRV_SECTOR_BITS,
                                        n << BDRV_SECTOR_BITS,
                                        BDRV_REQ_MAY_UNMAP);
             if (ret < 0) {
                 return ret;
             }
             break;
         }
 
         sector_num += n;
         nb_sectors -= n;
         buf += n * BDRV_SECTOR_SIZE;
     }
 
     return 0;
 }