diff mbox series

[libnbd,09/13] block_status: Accept 64-bit extents during block status

Message ID 20211203231741.3901263-10-eblake@redhat.com (mailing list archive)
State New, archived
Headers show
Series libnbd patches for NBD_OPT_EXTENDED_HEADERS | expand

Commit Message

Eric Blake Dec. 3, 2021, 11:17 p.m. UTC
Support a server giving us a 64-bit extent.  Note that the protocol
says a server should not give a 64-bit answer when extended headers
are not negotiated, but since the client's size is merely a hint, it
is possible for a server to have a 64-bit answer even when the
original query was 32 bits.  At any rate, it is just as easy for us to
always support the new chunk type as it is to complain when it is used
incorrectly by the server, and the user's 32-bit callback doesn't have
to care which size the server's result used (either the server's
result was a 32-bit value, or our shim silently truncates it, but the
user still makes progress).  Of course, until a later patch enables
extended headers negotiation, no compliant server will trigger the new
code here.

Implementation-wise, we don't care if we will be narrowing from the
server's 16-byte extent (including explicit padding) to a 12-byte
struct, or if our 'nbd_extent' type has implicit padding and is thus
also 16 bytes; either way, the order of our byte-swapping traversal is
safe.
---
 lib/internal.h                      |  1 +
 generator/states-reply-structured.c | 75 +++++++++++++++++++++++------
 2 files changed, 60 insertions(+), 16 deletions(-)
diff mbox series

Patch

diff --git a/lib/internal.h b/lib/internal.h
index 4800df83..97abf4f2 100644
--- a/lib/internal.h
+++ b/lib/internal.h
@@ -289,6 +289,7 @@  struct nbd_handle {
   union {
     nbd_extent *normal; /* Our 64-bit preferred internal form */
     uint32_t *narrow;   /* 32-bit form of NBD_REPLY_TYPE_BLOCK_STATUS */
+    struct nbd_block_descriptor_ext *wide; /* NBD_REPLY_TYPE_BLOCK_STATUS_EXT */
   } bs_entries;

   /* Commands which are waiting to be issued [meaning the request
diff --git a/generator/states-reply-structured.c b/generator/states-reply-structured.c
index 71c761e9..29b1c3d8 100644
--- a/generator/states-reply-structured.c
+++ b/generator/states-reply-structured.c
@@ -22,6 +22,8 @@ 
 #include <stdint.h>
 #include <inttypes.h>

+#include "minmax.h"
+
 /* Structured reply must be completely inside the bounds of the
  * requesting command.
  */
@@ -202,7 +204,8 @@  STATE_MACHINE {
     SET_NEXT_STATE (%RECV_OFFSET_HOLE);
     return 0;
   }
-  else if (type == NBD_REPLY_TYPE_BLOCK_STATUS) {
+  else if (type == NBD_REPLY_TYPE_BLOCK_STATUS ||
+           type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT) {
     if (cmd->type != NBD_CMD_BLOCK_STATUS) {
       SET_NEXT_STATE (%.DEAD);
       set_error (0, "invalid command for receiving block-status chunk, "
@@ -211,12 +214,19 @@  STATE_MACHINE {
                  cmd->type);
       return 0;
     }
-    /* XXX We should be able to skip the bad reply in these two cases. */
-    if (length < 12 || ((length-4) & 7) != 0) {
+    /* XXX We should be able to skip the bad reply in these cases. */
+    if (type == NBD_REPLY_TYPE_BLOCK_STATUS &&
+        (length < 12 || (length-4) % (2 * sizeof(uint32_t)))) {
       SET_NEXT_STATE (%.DEAD);
       set_error (0, "invalid length in NBD_REPLY_TYPE_BLOCK_STATUS");
       return 0;
     }
+    if (type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT &&
+        (length < 20 || (length-4) % sizeof(struct nbd_block_descriptor_ext))) {
+      SET_NEXT_STATE (%.DEAD);
+      set_error (0, "invalid length in NBD_REPLY_TYPE_BLOCK_STATUS_EXT");
+      return 0;
+    }
     if (CALLBACK_IS_NULL (cmd->cb.fn.extent)) {
       SET_NEXT_STATE (%.DEAD);
       set_error (0, "not expecting NBD_REPLY_TYPE_BLOCK_STATUS here");
@@ -495,6 +505,7 @@  STATE_MACHINE {
   struct command *cmd = h->reply_cmd;
   uint32_t length;
   uint32_t count;
+  uint16_t type;

   switch (recv_into_rbuf (h)) {
   case -1: SET_NEXT_STATE (%.DEAD); return 0;
@@ -504,24 +515,33 @@  STATE_MACHINE {
     return 0;
   case 0:
     length = h->sbuf.sr.hdr.structured_reply.length; /* normalized in CHECK */
+    type = be16toh (h->sbuf.sr.hdr.structured_reply.type);

     assert (cmd); /* guaranteed by CHECK */
     assert (cmd->type == NBD_CMD_BLOCK_STATUS);
     assert (length >= 12);
     length -= sizeof h->bs_contextid;
-    count = length / (2 * sizeof (uint32_t));
+    if (type == NBD_REPLY_TYPE_BLOCK_STATUS)
+      count = length / (2 * sizeof (uint32_t));
+    else {
+      assert (type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT);
+      /* XXX Insist on h->extended_headers? */
+      count = length / sizeof (struct nbd_block_descriptor_ext);
+    }

-    /* Read raw data into a subset of h->bs_entries, then expand it
+    /* Read raw data into an overlap of h->bs_entries, then move it
      * into place later later during byte-swapping.
      */
     free (h->bs_entries.normal);
-    h->bs_entries.normal = malloc (count * sizeof *h->bs_entries.normal);
+    h->bs_entries.normal = malloc (MAX (count * sizeof *h->bs_entries.normal,
+                                        length));
     if (h->bs_entries.normal == NULL) {
       SET_NEXT_STATE (%.DEAD);
       set_error (errno, "malloc");
       return 0;
     }
-    h->rbuf = h->bs_entries.narrow;
+    h->rbuf = type == NBD_REPLY_TYPE_BLOCK_STATUS
+      ? h->bs_entries.narrow : (void *) h->bs_entries.wide;
     h->rlen = length;
     SET_NEXT_STATE (%RECV_BS_ENTRIES);
   }
@@ -533,7 +553,7 @@  STATE_MACHINE {
   uint32_t count;
   size_t i;
   uint32_t context_id;
-  uint32_t *raw;
+  uint16_t type;
   struct meta_context *meta_context;

   switch (recv_into_rbuf (h)) {
@@ -544,23 +564,46 @@  STATE_MACHINE {
     return 0;
   case 0:
     length = h->sbuf.sr.hdr.structured_reply.length; /* normalized in CHECK */
+    type = be16toh (h->sbuf.sr.hdr.structured_reply.type);

     assert (cmd); /* guaranteed by CHECK */
     assert (cmd->type == NBD_CMD_BLOCK_STATUS);
     assert (CALLBACK_IS_NOT_NULL (cmd->cb.fn.extent));
     assert (h->bs_entries.normal);
     assert (length >= 12);
-    count = (length - sizeof h->bs_contextid) / (2 * sizeof (uint32_t));
+    length -= sizeof h->bs_contextid;

     /* Need to byte-swap the entries returned, but apart from that we
-     * don't validate them.  Reverse order is essential, since we are
-     * expanding in-place from narrow to wider type.
+     * don't validate them.
      */
-    raw = h->bs_entries.narrow;
-    for (i = count; i > 0; ) {
-      --i;
-      h->bs_entries.normal[i].flags = be32toh (raw[i * 2 + 1]);
-      h->bs_entries.normal[i].length = be32toh (raw[i * 2]);
+    if (type == NBD_REPLY_TYPE_BLOCK_STATUS) {
+      uint32_t *raw = h->bs_entries.narrow;
+
+      /* Expanding in-place from narrow to wide, must use reverse order. */
+      count = length / (2 * sizeof (uint32_t));
+      for (i = count; i > 0; ) {
+        --i;
+        h->bs_entries.normal[i].flags = be32toh (raw[i * 2 + 1]);
+        h->bs_entries.normal[i].length = be32toh (raw[i * 2]);
+      }
+    }
+    else {
+      struct nbd_block_descriptor_ext *wide = h->bs_entries.wide;
+
+      /* ABI determines whether nbd_extent is 12 or 16 bytes, but the
+       * server sent us 16 bytes, so we must process in forward order.
+       */
+      assert (type == NBD_REPLY_TYPE_BLOCK_STATUS_EXT);
+      count = length / sizeof (struct nbd_block_descriptor_ext);
+      for (i = 0; i < count; i++) {
+        h->bs_entries.normal[i].length = be64toh (wide[i].length);
+        h->bs_entries.normal[i].flags = be32toh (wide[i].status_flags);
+        if (wide[i].pad) {
+          set_error (0, "server sent non-zero padding in block status");
+          SET_NEXT_STATE(%.DEAD);
+          return 0;
+        }
+      }
     }

     /* Look up the context ID. */