diff mbox series

[v2,2/4] vhost-user: Interface for migration state transfer

Message ID 20230712111703.28031-3-hreitz@redhat.com (mailing list archive)
State New, archived
Headers show
Series vhost-user: Back-end state migration | expand

Commit Message

Hanna Czenczek July 12, 2023, 11:17 a.m. UTC
Add the interface for transferring the back-end's state during migration
as defined previously in vhost-user.rst.

Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
---
 include/hw/virtio/vhost-backend.h |  24 +++++
 include/hw/virtio/vhost.h         |  79 ++++++++++++++++
 hw/virtio/vhost-user.c            | 147 ++++++++++++++++++++++++++++++
 hw/virtio/vhost.c                 |  37 ++++++++
 4 files changed, 287 insertions(+)

Comments

Stefan Hajnoczi July 18, 2023, 6:32 p.m. UTC | #1
On Wed, Jul 12, 2023 at 01:17:00PM +0200, Hanna Czenczek wrote:
> Add the interface for transferring the back-end's state during migration
> as defined previously in vhost-user.rst.
> 
> Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
> ---
>  include/hw/virtio/vhost-backend.h |  24 +++++
>  include/hw/virtio/vhost.h         |  79 ++++++++++++++++
>  hw/virtio/vhost-user.c            | 147 ++++++++++++++++++++++++++++++
>  hw/virtio/vhost.c                 |  37 ++++++++
>  4 files changed, 287 insertions(+)

Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Hao Xu July 20, 2023, 12:13 p.m. UTC | #2
On 7/12/23 19:17, Hanna Czenczek wrote:
> Add the interface for transferring the back-end's state during migration
> as defined previously in vhost-user.rst.
>
> Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
> ---
>   include/hw/virtio/vhost-backend.h |  24 +++++
>   include/hw/virtio/vhost.h         |  79 ++++++++++++++++
>   hw/virtio/vhost-user.c            | 147 ++++++++++++++++++++++++++++++
>   hw/virtio/vhost.c                 |  37 ++++++++
>   4 files changed, 287 insertions(+)
>
> diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
> index 31a251a9f5..e59d0b53f8 100644
> --- a/include/hw/virtio/vhost-backend.h
> +++ b/include/hw/virtio/vhost-backend.h
> @@ -26,6 +26,18 @@ typedef enum VhostSetConfigType {
>       VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
>   } VhostSetConfigType;
>   
> +typedef enum VhostDeviceStateDirection {
> +    /* Transfer state from back-end (device) to front-end */
> +    VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0,
> +    /* Transfer state from front-end to back-end (device) */
> +    VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1,
> +} VhostDeviceStateDirection;
> +
> +typedef enum VhostDeviceStatePhase {
> +    /* The device (and all its vrings) is stopped */
> +    VHOST_TRANSFER_STATE_PHASE_STOPPED = 0,
> +} VhostDeviceStatePhase;
> +
>   struct vhost_inflight;
>   struct vhost_dev;
>   struct vhost_log;
> @@ -133,6 +145,15 @@ typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
>   
>   typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
>   
> +typedef bool (*vhost_supports_migratory_state_op)(struct vhost_dev *dev);
> +typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev,
> +                                            VhostDeviceStateDirection direction,
> +                                            VhostDeviceStatePhase phase,
> +                                            int fd,
> +                                            int *reply_fd,
> +                                            Error **errp);
> +typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, Error **errp);
> +
>   typedef struct VhostOps {
>       VhostBackendType backend_type;
>       vhost_backend_init vhost_backend_init;
> @@ -181,6 +202,9 @@ typedef struct VhostOps {
>       vhost_force_iommu_op vhost_force_iommu;
>       vhost_set_config_call_op vhost_set_config_call;
>       vhost_reset_status_op vhost_reset_status;
> +    vhost_supports_migratory_state_op vhost_supports_migratory_state;
> +    vhost_set_device_state_fd_op vhost_set_device_state_fd;
> +    vhost_check_device_state_op vhost_check_device_state;
>   } VhostOps;
>   
>   int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index 69bf59d630..d8877496e5 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -346,4 +346,83 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
>   int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
>                              struct vhost_inflight *inflight);
>   bool vhost_dev_has_iommu(struct vhost_dev *dev);
> +
> +/**
> + * vhost_supports_migratory_state(): Checks whether the back-end
> + * supports transferring internal state for the purpose of migration.
> + * Support for this feature is required for vhost_set_device_state_fd()
> + * and vhost_check_device_state().
> + *
> + * @dev: The vhost device
> + *
> + * Returns true if the device supports these commands, and false if it
> + * does not.
> + */
> +bool vhost_supports_migratory_state(struct vhost_dev *dev);
> +
> +/**
> + * vhost_set_device_state_fd(): Begin transfer of internal state from/to
> + * the back-end for the purpose of migration.  Data is to be transferred
> + * over a pipe according to @direction and @phase.  The sending end must
> + * only write to the pipe, and the receiving end must only read from it.
> + * Once the sending end is done, it closes its FD.  The receiving end
> + * must take this as the end-of-transfer signal and close its FD, too.
> + *
> + * @fd is the back-end's end of the pipe: The write FD for SAVE, and the
> + * read FD for LOAD.  This function transfers ownership of @fd to the
> + * back-end, i.e. closes it in the front-end.
> + *
> + * The back-end may optionally reply with an FD of its own, if this
> + * improves efficiency on its end.  In this case, the returned FD is


Hi Hanna,

In what case/situation, the back-end will have a more efficient fd?

Here my understanding of this "FD of its own" is as same type as

the given fd(e.g. both pipe files), why the fd from back-end makes

difference? Do I miss anything here?


Regards,

Hao


> + * stored in *reply_fd.  The back-end will discard the FD sent to it,
> + * and the front-end must use *reply_fd for transferring state to/from
> + * the back-end.
> + *
> + * @dev: The vhost device
> + * @direction: The direction in which the state is to be transferred.
> + *             For outgoing migrations, this is SAVE, and data is read
> + *             from the back-end and stored by the front-end in the
> + *             migration stream.
> + *             For incoming migrations, this is LOAD, and data is read
> + *             by the front-end from the migration stream and sent to
> + *             the back-end to restore the saved state.
> + * @phase: Which migration phase we are in.  Currently, there is only
> + *         STOPPED (device and all vrings are stopped), in the future,
> + *         more phases such as PRE_COPY or POST_COPY may be added.
> + * @fd: Back-end's end of the pipe through which to transfer state; note
> + *      that ownership is transferred to the back-end, so this function
> + *      closes @fd in the front-end.
> + * @reply_fd: If the back-end wishes to use a different pipe for state
> + *            transfer, this will contain an FD for the front-end to
> + *            use.  Otherwise, -1 is stored here.
> + * @errp: Potential error description
> + *
> + * Returns 0 on success, and -errno on failure.
> + */
> +int vhost_set_device_state_fd(struct vhost_dev *dev,
> +                              VhostDeviceStateDirection direction,
> +                              VhostDeviceStatePhase phase,
> +                              int fd,
> +                              int *reply_fd,
> +                              Error **errp);
> +
> +/**
> + * vhost_set_device_state_fd(): After transferring state from/to the
> + * back-end via vhost_set_device_state_fd(), i.e. once the sending end
> + * has closed the pipe, inquire the back-end to report any potential
> + * errors that have occurred on its side.  This allows to sense errors
> + * like:
> + * - During outgoing migration, when the source side had already started
> + *   to produce its state, something went wrong and it failed to finish
> + * - During incoming migration, when the received state is somehow
> + *   invalid and cannot be processed by the back-end
> + *
> + * @dev: The vhost device
> + * @errp: Potential error description
> + *
> + * Returns 0 when the back-end reports successful state transfer and
> + * processing, and -errno when an error occurred somewhere.
> + */
> +int vhost_check_device_state(struct vhost_dev *dev, Error **errp);
> +
>   #endif
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index 53a881ec2a..8e6b5485e8 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -76,6 +76,7 @@ enum VhostUserProtocolFeature {
>       VHOST_USER_PROTOCOL_F_STATUS = 16,
>       /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
>       VHOST_USER_PROTOCOL_F_SUSPEND = 18,
> +    VHOST_USER_PROTOCOL_F_MIGRATORY_STATE = 19,
>       VHOST_USER_PROTOCOL_F_MAX
>   };
>   
> @@ -125,6 +126,8 @@ typedef enum VhostUserRequest {
>       VHOST_USER_GET_STATUS = 40,
>       VHOST_USER_SUSPEND = 41,
>       VHOST_USER_RESUME = 42,
> +    VHOST_USER_SET_DEVICE_STATE_FD = 43,
> +    VHOST_USER_CHECK_DEVICE_STATE = 44,
>       VHOST_USER_MAX
>   } VhostUserRequest;
>   
> @@ -216,6 +219,12 @@ typedef struct {
>       uint32_t size; /* the following payload size */
>   } QEMU_PACKED VhostUserHeader;
>   
> +/* Request payload of VHOST_USER_SET_DEVICE_STATE_FD */
> +typedef struct VhostUserTransferDeviceState {
> +    uint32_t direction;
> +    uint32_t phase;
> +} VhostUserTransferDeviceState;
> +
>   typedef union {
>   #define VHOST_USER_VRING_IDX_MASK   (0xff)
>   #define VHOST_USER_VRING_NOFD_MASK  (0x1 << 8)
> @@ -230,6 +239,7 @@ typedef union {
>           VhostUserCryptoSession session;
>           VhostUserVringArea area;
>           VhostUserInflight inflight;
> +        VhostUserTransferDeviceState transfer_state;
>   } VhostUserPayload;
>   
>   typedef struct VhostUserMsg {
> @@ -2838,6 +2848,140 @@ static void vhost_user_reset_status(struct vhost_dev *dev)
>       }
>   }
>   
> +static bool vhost_user_supports_migratory_state(struct vhost_dev *dev)
> +{
> +    return virtio_has_feature(dev->protocol_features,
> +                              VHOST_USER_PROTOCOL_F_MIGRATORY_STATE);
> +}
> +
> +static int vhost_user_set_device_state_fd(struct vhost_dev *dev,
> +                                          VhostDeviceStateDirection direction,
> +                                          VhostDeviceStatePhase phase,
> +                                          int fd,
> +                                          int *reply_fd,
> +                                          Error **errp)
> +{
> +    int ret;
> +    struct vhost_user *vu = dev->opaque;
> +    VhostUserMsg msg = {
> +        .hdr = {
> +            .request = VHOST_USER_SET_DEVICE_STATE_FD,
> +            .flags = VHOST_USER_VERSION,
> +            .size = sizeof(msg.payload.transfer_state),
> +        },
> +        .payload.transfer_state = {
> +            .direction = direction,
> +            .phase = phase,
> +        },
> +    };
> +
> +    *reply_fd = -1;
> +
> +    if (!vhost_user_supports_migratory_state(dev)) {
> +        close(fd);
> +        error_setg(errp, "Back-end does not support migration state transfer");
> +        return -ENOTSUP;
> +    }
> +
> +    ret = vhost_user_write(dev, &msg, &fd, 1);
> +    close(fd);
> +    if (ret < 0) {
> +        error_setg_errno(errp, -ret,
> +                         "Failed to send SET_DEVICE_STATE_FD message");
> +        return ret;
> +    }
> +
> +    ret = vhost_user_read(dev, &msg);
> +    if (ret < 0) {
> +        error_setg_errno(errp, -ret,
> +                         "Failed to receive SET_DEVICE_STATE_FD reply");
> +        return ret;
> +    }
> +
> +    if (msg.hdr.request != VHOST_USER_SET_DEVICE_STATE_FD) {
> +        error_setg(errp,
> +                   "Received unexpected message type, expected %d, received %d",
> +                   VHOST_USER_SET_DEVICE_STATE_FD, msg.hdr.request);
> +        return -EPROTO;
> +    }
> +
> +    if (msg.hdr.size != sizeof(msg.payload.u64)) {
> +        error_setg(errp,
> +                   "Received bad message size, expected %zu, received %" PRIu32,
> +                   sizeof(msg.payload.u64), msg.hdr.size);
> +        return -EPROTO;
> +    }
> +
> +    if ((msg.payload.u64 & 0xff) != 0) {
> +        error_setg(errp, "Back-end did not accept migration state transfer");
> +        return -EIO;
> +    }
> +
> +    if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) {
> +        *reply_fd = qemu_chr_fe_get_msgfd(vu->user->chr);
> +        if (*reply_fd < 0) {
> +            error_setg(errp,
> +                       "Failed to get back-end-provided transfer pipe FD");
> +            *reply_fd = -1;
> +            return -EIO;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp)
> +{
> +    int ret;
> +    VhostUserMsg msg = {
> +        .hdr = {
> +            .request = VHOST_USER_CHECK_DEVICE_STATE,
> +            .flags = VHOST_USER_VERSION,
> +            .size = 0,
> +        },
> +    };
> +
> +    if (!vhost_user_supports_migratory_state(dev)) {
> +        error_setg(errp, "Back-end does not support migration state transfer");
> +        return -ENOTSUP;
> +    }
> +
> +    ret = vhost_user_write(dev, &msg, NULL, 0);
> +    if (ret < 0) {
> +        error_setg_errno(errp, -ret,
> +                         "Failed to send CHECK_DEVICE_STATE message");
> +        return ret;
> +    }
> +
> +    ret = vhost_user_read(dev, &msg);
> +    if (ret < 0) {
> +        error_setg_errno(errp, -ret,
> +                         "Failed to receive CHECK_DEVICE_STATE reply");
> +        return ret;
> +    }
> +
> +    if (msg.hdr.request != VHOST_USER_CHECK_DEVICE_STATE) {
> +        error_setg(errp,
> +                   "Received unexpected message type, expected %d, received %d",
> +                   VHOST_USER_CHECK_DEVICE_STATE, msg.hdr.request);
> +        return -EPROTO;
> +    }
> +
> +    if (msg.hdr.size != sizeof(msg.payload.u64)) {
> +        error_setg(errp,
> +                   "Received bad message size, expected %zu, received %" PRIu32,
> +                   sizeof(msg.payload.u64), msg.hdr.size);
> +        return -EPROTO;
> +    }
> +
> +    if (msg.payload.u64 != 0) {
> +        error_setg(errp, "Back-end failed to process its internal state");
> +        return -EIO;
> +    }
> +
> +    return 0;
> +}
> +
>   const VhostOps user_ops = {
>           .backend_type = VHOST_BACKEND_TYPE_USER,
>           .vhost_backend_init = vhost_user_backend_init,
> @@ -2874,4 +3018,7 @@ const VhostOps user_ops = {
>           .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
>           .vhost_dev_start = vhost_user_dev_start,
>           .vhost_reset_status = vhost_user_reset_status,
> +        .vhost_supports_migratory_state = vhost_user_supports_migratory_state,
> +        .vhost_set_device_state_fd = vhost_user_set_device_state_fd,
> +        .vhost_check_device_state = vhost_user_check_device_state,
>   };
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 2e28e58da7..756b6d55a8 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -2091,3 +2091,40 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
>   
>       return -ENOSYS;
>   }
> +
> +bool vhost_supports_migratory_state(struct vhost_dev *dev)
> +{
> +    if (dev->vhost_ops->vhost_supports_migratory_state) {
> +        return dev->vhost_ops->vhost_supports_migratory_state(dev);
> +    }
> +
> +    return false;
> +}
> +
> +int vhost_set_device_state_fd(struct vhost_dev *dev,
> +                              VhostDeviceStateDirection direction,
> +                              VhostDeviceStatePhase phase,
> +                              int fd,
> +                              int *reply_fd,
> +                              Error **errp)
> +{
> +    if (dev->vhost_ops->vhost_set_device_state_fd) {
> +        return dev->vhost_ops->vhost_set_device_state_fd(dev, direction, phase,
> +                                                         fd, reply_fd, errp);
> +    }
> +
> +    error_setg(errp,
> +               "vhost transport does not support migration state transfer");
> +    return -ENOSYS;
> +}
> +
> +int vhost_check_device_state(struct vhost_dev *dev, Error **errp)
> +{
> +    if (dev->vhost_ops->vhost_check_device_state) {
> +        return dev->vhost_ops->vhost_check_device_state(dev, errp);
> +    }
> +
> +    error_setg(errp,
> +               "vhost transport does not support migration state transfer");
> +    return -ENOSYS;
> +}
Hanna Czenczek July 20, 2023, 1:20 p.m. UTC | #3
On 20.07.23 14:13, Hao Xu wrote:
>
> On 7/12/23 19:17, Hanna Czenczek wrote:
>> Add the interface for transferring the back-end's state during migration
>> as defined previously in vhost-user.rst.
>>
>> Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
>> ---
>>   include/hw/virtio/vhost-backend.h |  24 +++++
>>   include/hw/virtio/vhost.h         |  79 ++++++++++++++++
>>   hw/virtio/vhost-user.c            | 147 ++++++++++++++++++++++++++++++
>>   hw/virtio/vhost.c                 |  37 ++++++++
>>   4 files changed, 287 insertions(+)
>>
>> diff --git a/include/hw/virtio/vhost-backend.h 
>> b/include/hw/virtio/vhost-backend.h
>> index 31a251a9f5..e59d0b53f8 100644
>> --- a/include/hw/virtio/vhost-backend.h
>> +++ b/include/hw/virtio/vhost-backend.h
>> @@ -26,6 +26,18 @@ typedef enum VhostSetConfigType {
>>       VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
>>   } VhostSetConfigType;
>>   +typedef enum VhostDeviceStateDirection {
>> +    /* Transfer state from back-end (device) to front-end */
>> +    VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0,
>> +    /* Transfer state from front-end to back-end (device) */
>> +    VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1,
>> +} VhostDeviceStateDirection;
>> +
>> +typedef enum VhostDeviceStatePhase {
>> +    /* The device (and all its vrings) is stopped */
>> +    VHOST_TRANSFER_STATE_PHASE_STOPPED = 0,
>> +} VhostDeviceStatePhase;
>> +
>>   struct vhost_inflight;
>>   struct vhost_dev;
>>   struct vhost_log;
>> @@ -133,6 +145,15 @@ typedef int (*vhost_set_config_call_op)(struct 
>> vhost_dev *dev,
>>     typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
>>   +typedef bool (*vhost_supports_migratory_state_op)(struct vhost_dev 
>> *dev);
>> +typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev,
>> + VhostDeviceStateDirection direction,
>> + VhostDeviceStatePhase phase,
>> +                                            int fd,
>> +                                            int *reply_fd,
>> +                                            Error **errp);
>> +typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, 
>> Error **errp);
>> +
>>   typedef struct VhostOps {
>>       VhostBackendType backend_type;
>>       vhost_backend_init vhost_backend_init;
>> @@ -181,6 +202,9 @@ typedef struct VhostOps {
>>       vhost_force_iommu_op vhost_force_iommu;
>>       vhost_set_config_call_op vhost_set_config_call;
>>       vhost_reset_status_op vhost_reset_status;
>> +    vhost_supports_migratory_state_op vhost_supports_migratory_state;
>> +    vhost_set_device_state_fd_op vhost_set_device_state_fd;
>> +    vhost_check_device_state_op vhost_check_device_state;
>>   } VhostOps;
>>     int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
>> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
>> index 69bf59d630..d8877496e5 100644
>> --- a/include/hw/virtio/vhost.h
>> +++ b/include/hw/virtio/vhost.h
>> @@ -346,4 +346,83 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
>>   int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
>>                              struct vhost_inflight *inflight);
>>   bool vhost_dev_has_iommu(struct vhost_dev *dev);
>> +
>> +/**
>> + * vhost_supports_migratory_state(): Checks whether the back-end
>> + * supports transferring internal state for the purpose of migration.
>> + * Support for this feature is required for vhost_set_device_state_fd()
>> + * and vhost_check_device_state().
>> + *
>> + * @dev: The vhost device
>> + *
>> + * Returns true if the device supports these commands, and false if it
>> + * does not.
>> + */
>> +bool vhost_supports_migratory_state(struct vhost_dev *dev);
>> +
>> +/**
>> + * vhost_set_device_state_fd(): Begin transfer of internal state 
>> from/to
>> + * the back-end for the purpose of migration.  Data is to be 
>> transferred
>> + * over a pipe according to @direction and @phase.  The sending end 
>> must
>> + * only write to the pipe, and the receiving end must only read from 
>> it.
>> + * Once the sending end is done, it closes its FD.  The receiving end
>> + * must take this as the end-of-transfer signal and close its FD, too.
>> + *
>> + * @fd is the back-end's end of the pipe: The write FD for SAVE, and 
>> the
>> + * read FD for LOAD.  This function transfers ownership of @fd to the
>> + * back-end, i.e. closes it in the front-end.
>> + *
>> + * The back-end may optionally reply with an FD of its own, if this
>> + * improves efficiency on its end.  In this case, the returned FD is
>
>
> Hi Hanna,
>
> In what case/situation, the back-end will have a more efficient fd?

Hi Hao,

There is no example yet.

> Here my understanding of this "FD of its own" is as same type as
>
> the given fd(e.g. both pipe files), why the fd from back-end makes
>
> difference? Do I miss anything here?

Maybe it makes more sense in the context of how we came up with the 
idea: Specifically, Stefan and me were asking which end should provide 
the FD.  In the context of vhost-user, it makes sense to have it be the 
front-end, because it controls vhost-user communication, but that’s just 
the natural protocol choice, not necessarily the most efficient one.

It is imaginable that the front-end (e.g. qemu) could create a file 
descriptor whose data is directly spliced (automatically) into the 
migration stream, and then hand this FD to the back-end.  In practice, 
this doesn’t work for qemu (at this point), because it doesn’t use 
simple read/write into the migration stream, but has an abstraction 
layer for that.  It might be possible to make this work in some cases, 
depending on what is used as a transport, but (1) not generally, and (2) 
not now.  But this would be efficient.

The model we’d implement in qemu with this series is comparatively not 
efficient, because it manually copies data from the FD (which by default 
is a pipe) into the migration stream.

But it is possible that the front-end can provide a zero-copy FD into 
the migration stream, and for that reason we want to allow the front-end 
to provide the transfer FD.

In contrast, the back-end might have a more efficient implementation on 
its side, too, though.  It is difficult to imagine, but it may be 
possible that it has an FD already where the data needs to written 
to/read from, e.g. because it’s connected to a physical device that 
wants to get/send its state this way.  Admittedly, we have absolutely no 
concrete example for such a back-end at this point, but it’s hard to 
rule out that it is possible that there will be back-ends that could 
make use of zero-copy if only they are allowed to dictate the transfer FD.

So because we in qemu can’t (at least not generally) provide an 
efficient (zero-copy) implementation, we don’t want to rule out that the 
back-end might be able to, so we also want to allow it to provide the 
transfer FD.

In the end, we decided that we don’t want to preclude either side of 
providing the FD.  If one side knows it can do better than a plain pipe 
with copying on both ends, it should provide the FD.  That doesn’t 
complicate the implementation much.

(So, notably, we measure “improves efficiency” based on “is it better 
than a plain pipe with copying on both ends”.  A pipe with copying is 
the default implementation, but as Stefan has pointed out in his review, 
it doesn’t need to be a pipe.  More efficient FDs, like the back-end can 
provide in its reply, would actually likely not be pipes.)

Hanna
Hao Xu July 20, 2023, 3:05 p.m. UTC | #4
On 7/20/23 21:20, Hanna Czenczek wrote:
> On 20.07.23 14:13, Hao Xu wrote:
>>
>> On 7/12/23 19:17, Hanna Czenczek wrote:
>>> Add the interface for transferring the back-end's state during 
>>> migration
>>> as defined previously in vhost-user.rst.
>>>
>>> Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
>>> ---
>>>   include/hw/virtio/vhost-backend.h |  24 +++++
>>>   include/hw/virtio/vhost.h         |  79 ++++++++++++++++
>>>   hw/virtio/vhost-user.c            | 147 
>>> ++++++++++++++++++++++++++++++
>>>   hw/virtio/vhost.c                 |  37 ++++++++
>>>   4 files changed, 287 insertions(+)
>>>
>>> diff --git a/include/hw/virtio/vhost-backend.h 
>>> b/include/hw/virtio/vhost-backend.h
>>> index 31a251a9f5..e59d0b53f8 100644
>>> --- a/include/hw/virtio/vhost-backend.h
>>> +++ b/include/hw/virtio/vhost-backend.h
>>> @@ -26,6 +26,18 @@ typedef enum VhostSetConfigType {
>>>       VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
>>>   } VhostSetConfigType;
>>>   +typedef enum VhostDeviceStateDirection {
>>> +    /* Transfer state from back-end (device) to front-end */
>>> +    VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0,
>>> +    /* Transfer state from front-end to back-end (device) */
>>> +    VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1,
>>> +} VhostDeviceStateDirection;
>>> +
>>> +typedef enum VhostDeviceStatePhase {
>>> +    /* The device (and all its vrings) is stopped */
>>> +    VHOST_TRANSFER_STATE_PHASE_STOPPED = 0,
>>> +} VhostDeviceStatePhase;
>>> +
>>>   struct vhost_inflight;
>>>   struct vhost_dev;
>>>   struct vhost_log;
>>> @@ -133,6 +145,15 @@ typedef int (*vhost_set_config_call_op)(struct 
>>> vhost_dev *dev,
>>>     typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
>>>   +typedef bool (*vhost_supports_migratory_state_op)(struct 
>>> vhost_dev *dev);
>>> +typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev,
>>> + VhostDeviceStateDirection direction,
>>> + VhostDeviceStatePhase phase,
>>> +                                            int fd,
>>> +                                            int *reply_fd,
>>> +                                            Error **errp);
>>> +typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, 
>>> Error **errp);
>>> +
>>>   typedef struct VhostOps {
>>>       VhostBackendType backend_type;
>>>       vhost_backend_init vhost_backend_init;
>>> @@ -181,6 +202,9 @@ typedef struct VhostOps {
>>>       vhost_force_iommu_op vhost_force_iommu;
>>>       vhost_set_config_call_op vhost_set_config_call;
>>>       vhost_reset_status_op vhost_reset_status;
>>> +    vhost_supports_migratory_state_op vhost_supports_migratory_state;
>>> +    vhost_set_device_state_fd_op vhost_set_device_state_fd;
>>> +    vhost_check_device_state_op vhost_check_device_state;
>>>   } VhostOps;
>>>     int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
>>> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
>>> index 69bf59d630..d8877496e5 100644
>>> --- a/include/hw/virtio/vhost.h
>>> +++ b/include/hw/virtio/vhost.h
>>> @@ -346,4 +346,83 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
>>>   int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t 
>>> queue_size,
>>>                              struct vhost_inflight *inflight);
>>>   bool vhost_dev_has_iommu(struct vhost_dev *dev);
>>> +
>>> +/**
>>> + * vhost_supports_migratory_state(): Checks whether the back-end
>>> + * supports transferring internal state for the purpose of migration.
>>> + * Support for this feature is required for 
>>> vhost_set_device_state_fd()
>>> + * and vhost_check_device_state().
>>> + *
>>> + * @dev: The vhost device
>>> + *
>>> + * Returns true if the device supports these commands, and false if it
>>> + * does not.
>>> + */
>>> +bool vhost_supports_migratory_state(struct vhost_dev *dev);
>>> +
>>> +/**
>>> + * vhost_set_device_state_fd(): Begin transfer of internal state 
>>> from/to
>>> + * the back-end for the purpose of migration.  Data is to be 
>>> transferred
>>> + * over a pipe according to @direction and @phase.  The sending end 
>>> must
>>> + * only write to the pipe, and the receiving end must only read 
>>> from it.
>>> + * Once the sending end is done, it closes its FD.  The receiving end
>>> + * must take this as the end-of-transfer signal and close its FD, too.
>>> + *
>>> + * @fd is the back-end's end of the pipe: The write FD for SAVE, 
>>> and the
>>> + * read FD for LOAD.  This function transfers ownership of @fd to the
>>> + * back-end, i.e. closes it in the front-end.
>>> + *
>>> + * The back-end may optionally reply with an FD of its own, if this
>>> + * improves efficiency on its end.  In this case, the returned FD is
>>
>>
>> Hi Hanna,
>>
>> In what case/situation, the back-end will have a more efficient fd?
>
> Hi Hao,
>
> There is no example yet.
>
>> Here my understanding of this "FD of its own" is as same type as
>>
>> the given fd(e.g. both pipe files), why the fd from back-end makes
>>
>> difference? Do I miss anything here?
>
> Maybe it makes more sense in the context of how we came up with the 
> idea: Specifically, Stefan and me were asking which end should provide 
> the FD.  In the context of vhost-user, it makes sense to have it be 
> the front-end, because it controls vhost-user communication, but 
> that’s just the natural protocol choice, not necessarily the most 
> efficient one.
>
> It is imaginable that the front-end (e.g. qemu) could create a file 
> descriptor whose data is directly spliced (automatically) into the 
> migration stream, and then hand this FD to the back-end. In practice, 
> this doesn’t work for qemu (at this point), because it doesn’t use 
> simple read/write into the migration stream, but has an abstraction 
> layer for that.  It might be possible to make this work in some cases, 
> depending on what is used as a transport, but (1) not generally, and 
> (2) not now.  But this would be efficient.


I'm thinking one thing, we now already have a channel(a unix domain 
socket) between front-end and back-end, why not delivering the state 
file (as an fd) to/from back-end directly rathen than negotiating

a new pipe as data channel.(since we already assume they can share fd of 
pipe, why not fd of normal file?)



>
> The model we’d implement in qemu with this series is comparatively not 
> efficient, because it manually copies data from the FD (which by 
> default is a pipe) into the migration stream.
>
> But it is possible that the front-end can provide a zero-copy FD into 
> the migration stream, and for that reason we want to allow the 
> front-end to provide the transfer FD.
>
> In contrast, the back-end might have a more efficient implementation 
> on its side, too, though.  It is difficult to imagine, but it may be 
> possible that it has an FD already where the data needs to written 
> to/read from, e.g. because it’s connected to a physical device that 
> wants to get/send its state this way.  Admittedly, we have absolutely 
> no concrete example for such a back-end at this point, but it’s hard 
> to rule out that it is possible that there will be back-ends that 
> could make use of zero-copy if only they are allowed to dictate the 
> transfer FD.
>
> So because we in qemu can’t (at least not generally) provide an 
> efficient (zero-copy) implementation, we don’t want to rule out that 
> the back-end might be able to, so we also want to allow it to provide 
> the transfer FD.
>
> In the end, we decided that we don’t want to preclude either side of 
> providing the FD.  If one side knows it can do better than a plain 
> pipe with copying on both ends, it should provide the FD. That doesn’t 
> complicate the implementation much.
>
> (So, notably, we measure “improves efficiency” based on “is it better 
> than a plain pipe with copying on both ends”.  A pipe with copying is 
> the default implementation, but as Stefan has pointed out in his 
> review, it doesn’t need to be a pipe.  More efficient FDs, like the 
> back-end can provide in its reply, would actually likely not be pipes.)


Yea, but the vhost-user protocol in this patchset defines these FDs as 
data transfer channel not the data itself, how about the latter?




>
> Hanna
>
Hanna Czenczek July 21, 2023, 8:07 a.m. UTC | #5
On 20.07.23 17:05, Hao Xu wrote:
>
> On 7/20/23 21:20, Hanna Czenczek wrote:
>> On 20.07.23 14:13, Hao Xu wrote:
>>>
>>> On 7/12/23 19:17, Hanna Czenczek wrote:
>>>> Add the interface for transferring the back-end's state during 
>>>> migration
>>>> as defined previously in vhost-user.rst.
>>>>
>>>> Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
>>>> ---
>>>>   include/hw/virtio/vhost-backend.h |  24 +++++
>>>>   include/hw/virtio/vhost.h         |  79 ++++++++++++++++
>>>>   hw/virtio/vhost-user.c            | 147 
>>>> ++++++++++++++++++++++++++++++
>>>>   hw/virtio/vhost.c                 |  37 ++++++++
>>>>   4 files changed, 287 insertions(+)
>>>>
>>>> diff --git a/include/hw/virtio/vhost-backend.h 
>>>> b/include/hw/virtio/vhost-backend.h
>>>> index 31a251a9f5..e59d0b53f8 100644
>>>> --- a/include/hw/virtio/vhost-backend.h
>>>> +++ b/include/hw/virtio/vhost-backend.h
>>>> @@ -26,6 +26,18 @@ typedef enum VhostSetConfigType {
>>>>       VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
>>>>   } VhostSetConfigType;
>>>>   +typedef enum VhostDeviceStateDirection {
>>>> +    /* Transfer state from back-end (device) to front-end */
>>>> +    VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0,
>>>> +    /* Transfer state from front-end to back-end (device) */
>>>> +    VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1,
>>>> +} VhostDeviceStateDirection;
>>>> +
>>>> +typedef enum VhostDeviceStatePhase {
>>>> +    /* The device (and all its vrings) is stopped */
>>>> +    VHOST_TRANSFER_STATE_PHASE_STOPPED = 0,
>>>> +} VhostDeviceStatePhase;
>>>> +
>>>>   struct vhost_inflight;
>>>>   struct vhost_dev;
>>>>   struct vhost_log;
>>>> @@ -133,6 +145,15 @@ typedef int (*vhost_set_config_call_op)(struct 
>>>> vhost_dev *dev,
>>>>     typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
>>>>   +typedef bool (*vhost_supports_migratory_state_op)(struct 
>>>> vhost_dev *dev);
>>>> +typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev,
>>>> + VhostDeviceStateDirection direction,
>>>> + VhostDeviceStatePhase phase,
>>>> +                                            int fd,
>>>> +                                            int *reply_fd,
>>>> +                                            Error **errp);
>>>> +typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, 
>>>> Error **errp);
>>>> +
>>>>   typedef struct VhostOps {
>>>>       VhostBackendType backend_type;
>>>>       vhost_backend_init vhost_backend_init;
>>>> @@ -181,6 +202,9 @@ typedef struct VhostOps {
>>>>       vhost_force_iommu_op vhost_force_iommu;
>>>>       vhost_set_config_call_op vhost_set_config_call;
>>>>       vhost_reset_status_op vhost_reset_status;
>>>> +    vhost_supports_migratory_state_op vhost_supports_migratory_state;
>>>> +    vhost_set_device_state_fd_op vhost_set_device_state_fd;
>>>> +    vhost_check_device_state_op vhost_check_device_state;
>>>>   } VhostOps;
>>>>     int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
>>>> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
>>>> index 69bf59d630..d8877496e5 100644
>>>> --- a/include/hw/virtio/vhost.h
>>>> +++ b/include/hw/virtio/vhost.h
>>>> @@ -346,4 +346,83 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
>>>>   int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t 
>>>> queue_size,
>>>>                              struct vhost_inflight *inflight);
>>>>   bool vhost_dev_has_iommu(struct vhost_dev *dev);
>>>> +
>>>> +/**
>>>> + * vhost_supports_migratory_state(): Checks whether the back-end
>>>> + * supports transferring internal state for the purpose of migration.
>>>> + * Support for this feature is required for 
>>>> vhost_set_device_state_fd()
>>>> + * and vhost_check_device_state().
>>>> + *
>>>> + * @dev: The vhost device
>>>> + *
>>>> + * Returns true if the device supports these commands, and false 
>>>> if it
>>>> + * does not.
>>>> + */
>>>> +bool vhost_supports_migratory_state(struct vhost_dev *dev);
>>>> +
>>>> +/**
>>>> + * vhost_set_device_state_fd(): Begin transfer of internal state 
>>>> from/to
>>>> + * the back-end for the purpose of migration.  Data is to be 
>>>> transferred
>>>> + * over a pipe according to @direction and @phase.  The sending 
>>>> end must
>>>> + * only write to the pipe, and the receiving end must only read 
>>>> from it.
>>>> + * Once the sending end is done, it closes its FD.  The receiving end
>>>> + * must take this as the end-of-transfer signal and close its FD, 
>>>> too.
>>>> + *
>>>> + * @fd is the back-end's end of the pipe: The write FD for SAVE, 
>>>> and the
>>>> + * read FD for LOAD.  This function transfers ownership of @fd to the
>>>> + * back-end, i.e. closes it in the front-end.
>>>> + *
>>>> + * The back-end may optionally reply with an FD of its own, if this
>>>> + * improves efficiency on its end.  In this case, the returned FD is
>>>
>>>
>>> Hi Hanna,
>>>
>>> In what case/situation, the back-end will have a more efficient fd?
>>
>> Hi Hao,
>>
>> There is no example yet.
>>
>>> Here my understanding of this "FD of its own" is as same type as
>>>
>>> the given fd(e.g. both pipe files), why the fd from back-end makes
>>>
>>> difference? Do I miss anything here?
>>
>> Maybe it makes more sense in the context of how we came up with the 
>> idea: Specifically, Stefan and me were asking which end should 
>> provide the FD.  In the context of vhost-user, it makes sense to have 
>> it be the front-end, because it controls vhost-user communication, 
>> but that’s just the natural protocol choice, not necessarily the most 
>> efficient one.
>>
>> It is imaginable that the front-end (e.g. qemu) could create a file 
>> descriptor whose data is directly spliced (automatically) into the 
>> migration stream, and then hand this FD to the back-end. In practice, 
>> this doesn’t work for qemu (at this point), because it doesn’t use 
>> simple read/write into the migration stream, but has an abstraction 
>> layer for that.  It might be possible to make this work in some 
>> cases, depending on what is used as a transport, but (1) not 
>> generally, and (2) not now.  But this would be efficient.
>
>
> I'm thinking one thing, we now already have a channel(a unix domain 
> socket) between front-end and back-end, why not delivering the state 
> file (as an fd) to/from back-end directly rathen than negotiating
>
> a new pipe as data channel.(since we already assume they can share fd 
> of pipe, why not fd of normal file?)

I don’t quite understand.  We do deliver the FD to the back-end through 
this new command to the back-end, rather directly.  The back-end can 
disagree and send another FD back, but it doesn’t have to.  It can just 
simply always take the FD that it’s been given.

The FD also doesn’t need to be a pipe, it can be any FD that the 
back-end can read the state from (see my discussion with Stefan on patch 
1).  In the practical implementation, it’s difficult for qemu to provide 
anything that is not a pipe, because the migration state can be sent 
over many channels, and the respective FD (if any) is not exposed on the 
high level (for devices).

>
>>
>> The model we’d implement in qemu with this series is comparatively 
>> not efficient, because it manually copies data from the FD (which by 
>> default is a pipe) into the migration stream.
>>
>> But it is possible that the front-end can provide a zero-copy FD into 
>> the migration stream, and for that reason we want to allow the 
>> front-end to provide the transfer FD.
>>
>> In contrast, the back-end might have a more efficient implementation 
>> on its side, too, though.  It is difficult to imagine, but it may be 
>> possible that it has an FD already where the data needs to written 
>> to/read from, e.g. because it’s connected to a physical device that 
>> wants to get/send its state this way.  Admittedly, we have absolutely 
>> no concrete example for such a back-end at this point, but it’s hard 
>> to rule out that it is possible that there will be back-ends that 
>> could make use of zero-copy if only they are allowed to dictate the 
>> transfer FD.
>>
>> So because we in qemu can’t (at least not generally) provide an 
>> efficient (zero-copy) implementation, we don’t want to rule out that 
>> the back-end might be able to, so we also want to allow it to provide 
>> the transfer FD.
>>
>> In the end, we decided that we don’t want to preclude either side of 
>> providing the FD.  If one side knows it can do better than a plain 
>> pipe with copying on both ends, it should provide the FD. That 
>> doesn’t complicate the implementation much.
>>
>> (So, notably, we measure “improves efficiency” based on “is it better 
>> than a plain pipe with copying on both ends”.  A pipe with copying is 
>> the default implementation, but as Stefan has pointed out in his 
>> review, it doesn’t need to be a pipe.  More efficient FDs, like the 
>> back-end can provide in its reply, would actually likely not be pipes.)
>
>
> Yea, but the vhost-user protocol in this patchset defines these FDs as 
> data transfer channel not the data itself, how about the latter?

I don’t quite understand this either.  Perhaps you’re suggesting that we 
should just transfer the data over the vhost-user socket. There are two 
reasons why I don’t think we should do that: First, the vhost-user 
socket is currently only used for a small amount of data.  For example, 
the Rust vhost crate will reject any message longer than 4096 bytes.  So 
if I were to use it for more data, I feel like I’d be abusing it.  
Second, if/when we add other transfer phases (i.e. while the VM is still 
running, to transfer larger amounts of state), it will be important that 
the vhost-user socket is open for messages from the front-end at any 
time.  So then, at the latest, would we need a separate data channel.  I 
don’t feel like there is harm in making the interface have this separate 
channel now already.

Or maybe you suggest that the separate FD is OK, but it shouldn’t be 
defined to be a data transfer channel, but just data, i.e. not 
necessarily a pipe.  I think that’s basically the discussion I had with 
Stefan on patch 1: That the important thing is just that the receiving 
end can read data out of the FD until EOF, and the writing end can write 
data and close the FD to signal the end of data. Whether they actually 
do that is not important, only that anything they do will let the other 
end do that.

Hanna
Hao Xu July 21, 2023, 8:23 a.m. UTC | #6
On 7/21/23 16:07, Hanna Czenczek wrote:
> On 20.07.23 17:05, Hao Xu wrote:
>>
>> On 7/20/23 21:20, Hanna Czenczek wrote:
>>> On 20.07.23 14:13, Hao Xu wrote:
>>>>
>>>> On 7/12/23 19:17, Hanna Czenczek wrote:
>>>>> Add the interface for transferring the back-end's state during 
>>>>> migration
>>>>> as defined previously in vhost-user.rst.
>>>>>
>>>>> Signed-off-by: Hanna Czenczek <hreitz@redhat.com>
>>>>> ---
>>>>>   include/hw/virtio/vhost-backend.h |  24 +++++
>>>>>   include/hw/virtio/vhost.h         |  79 ++++++++++++++++
>>>>>   hw/virtio/vhost-user.c            | 147 
>>>>> ++++++++++++++++++++++++++++++
>>>>>   hw/virtio/vhost.c                 |  37 ++++++++
>>>>>   4 files changed, 287 insertions(+)
>>>>>
>>>>> diff --git a/include/hw/virtio/vhost-backend.h 
>>>>> b/include/hw/virtio/vhost-backend.h
>>>>> index 31a251a9f5..e59d0b53f8 100644
>>>>> --- a/include/hw/virtio/vhost-backend.h
>>>>> +++ b/include/hw/virtio/vhost-backend.h
>>>>> @@ -26,6 +26,18 @@ typedef enum VhostSetConfigType {
>>>>>       VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
>>>>>   } VhostSetConfigType;
>>>>>   +typedef enum VhostDeviceStateDirection {
>>>>> +    /* Transfer state from back-end (device) to front-end */
>>>>> +    VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0,
>>>>> +    /* Transfer state from front-end to back-end (device) */
>>>>> +    VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1,
>>>>> +} VhostDeviceStateDirection;
>>>>> +
>>>>> +typedef enum VhostDeviceStatePhase {
>>>>> +    /* The device (and all its vrings) is stopped */
>>>>> +    VHOST_TRANSFER_STATE_PHASE_STOPPED = 0,
>>>>> +} VhostDeviceStatePhase;
>>>>> +
>>>>>   struct vhost_inflight;
>>>>>   struct vhost_dev;
>>>>>   struct vhost_log;
>>>>> @@ -133,6 +145,15 @@ typedef int 
>>>>> (*vhost_set_config_call_op)(struct vhost_dev *dev,
>>>>>     typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
>>>>>   +typedef bool (*vhost_supports_migratory_state_op)(struct 
>>>>> vhost_dev *dev);
>>>>> +typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev,
>>>>> + VhostDeviceStateDirection direction,
>>>>> + VhostDeviceStatePhase phase,
>>>>> +                                            int fd,
>>>>> +                                            int *reply_fd,
>>>>> +                                            Error **errp);
>>>>> +typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, 
>>>>> Error **errp);
>>>>> +
>>>>>   typedef struct VhostOps {
>>>>>       VhostBackendType backend_type;
>>>>>       vhost_backend_init vhost_backend_init;
>>>>> @@ -181,6 +202,9 @@ typedef struct VhostOps {
>>>>>       vhost_force_iommu_op vhost_force_iommu;
>>>>>       vhost_set_config_call_op vhost_set_config_call;
>>>>>       vhost_reset_status_op vhost_reset_status;
>>>>> +    vhost_supports_migratory_state_op 
>>>>> vhost_supports_migratory_state;
>>>>> +    vhost_set_device_state_fd_op vhost_set_device_state_fd;
>>>>> +    vhost_check_device_state_op vhost_check_device_state;
>>>>>   } VhostOps;
>>>>>     int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
>>>>> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
>>>>> index 69bf59d630..d8877496e5 100644
>>>>> --- a/include/hw/virtio/vhost.h
>>>>> +++ b/include/hw/virtio/vhost.h
>>>>> @@ -346,4 +346,83 @@ int vhost_dev_set_inflight(struct vhost_dev 
>>>>> *dev,
>>>>>   int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t 
>>>>> queue_size,
>>>>>                              struct vhost_inflight *inflight);
>>>>>   bool vhost_dev_has_iommu(struct vhost_dev *dev);
>>>>> +
>>>>> +/**
>>>>> + * vhost_supports_migratory_state(): Checks whether the back-end
>>>>> + * supports transferring internal state for the purpose of 
>>>>> migration.
>>>>> + * Support for this feature is required for 
>>>>> vhost_set_device_state_fd()
>>>>> + * and vhost_check_device_state().
>>>>> + *
>>>>> + * @dev: The vhost device
>>>>> + *
>>>>> + * Returns true if the device supports these commands, and false 
>>>>> if it
>>>>> + * does not.
>>>>> + */
>>>>> +bool vhost_supports_migratory_state(struct vhost_dev *dev);
>>>>> +
>>>>> +/**
>>>>> + * vhost_set_device_state_fd(): Begin transfer of internal state 
>>>>> from/to
>>>>> + * the back-end for the purpose of migration.  Data is to be 
>>>>> transferred
>>>>> + * over a pipe according to @direction and @phase.  The sending 
>>>>> end must
>>>>> + * only write to the pipe, and the receiving end must only read 
>>>>> from it.
>>>>> + * Once the sending end is done, it closes its FD.  The receiving 
>>>>> end
>>>>> + * must take this as the end-of-transfer signal and close its FD, 
>>>>> too.
>>>>> + *
>>>>> + * @fd is the back-end's end of the pipe: The write FD for SAVE, 
>>>>> and the
>>>>> + * read FD for LOAD.  This function transfers ownership of @fd to 
>>>>> the
>>>>> + * back-end, i.e. closes it in the front-end.
>>>>> + *
>>>>> + * The back-end may optionally reply with an FD of its own, if this
>>>>> + * improves efficiency on its end.  In this case, the returned FD is
>>>>
>>>>
>>>> Hi Hanna,
>>>>
>>>> In what case/situation, the back-end will have a more efficient fd?
>>>
>>> Hi Hao,
>>>
>>> There is no example yet.
>>>
>>>> Here my understanding of this "FD of its own" is as same type as
>>>>
>>>> the given fd(e.g. both pipe files), why the fd from back-end makes
>>>>
>>>> difference? Do I miss anything here?
>>>
>>> Maybe it makes more sense in the context of how we came up with the 
>>> idea: Specifically, Stefan and me were asking which end should 
>>> provide the FD.  In the context of vhost-user, it makes sense to 
>>> have it be the front-end, because it controls vhost-user 
>>> communication, but that’s just the natural protocol choice, not 
>>> necessarily the most efficient one.
>>>
>>> It is imaginable that the front-end (e.g. qemu) could create a file 
>>> descriptor whose data is directly spliced (automatically) into the 
>>> migration stream, and then hand this FD to the back-end. In 
>>> practice, this doesn’t work for qemu (at this point), because it 
>>> doesn’t use simple read/write into the migration stream, but has an 
>>> abstraction layer for that.  It might be possible to make this work 
>>> in some cases, depending on what is used as a transport, but (1) not 
>>> generally, and (2) not now.  But this would be efficient.
>>
>>
>> I'm thinking one thing, we now already have a channel(a unix domain 
>> socket) between front-end and back-end, why not delivering the state 
>> file (as an fd) to/from back-end directly rathen than negotiating
>>
>> a new pipe as data channel.(since we already assume they can share fd 
>> of pipe, why not fd of normal file?)
>
> I don’t quite understand.  We do deliver the FD to the back-end 
> through this new command to the back-end, rather directly.  The 
> back-end can disagree and send another FD back, but it doesn’t have 
> to.  It can just simply always take the FD that it’s been given.
>
> The FD also doesn’t need to be a pipe, it can be any FD that the 
> back-end can read the state from (see my discussion with Stefan on 
> patch 1).  In the practical implementation, it’s difficult for qemu to 
> provide anything that is not a pipe, because the migration state can 
> be sent over many channels, and the respective FD (if any) is not 
> exposed on the high level (for devices).
>
>>
>>>
>>> The model we’d implement in qemu with this series is comparatively 
>>> not efficient, because it manually copies data from the FD (which by 
>>> default is a pipe) into the migration stream.
>>>
>>> But it is possible that the front-end can provide a zero-copy FD 
>>> into the migration stream, and for that reason we want to allow the 
>>> front-end to provide the transfer FD.
>>>
>>> In contrast, the back-end might have a more efficient implementation 
>>> on its side, too, though.  It is difficult to imagine, but it may be 
>>> possible that it has an FD already where the data needs to written 
>>> to/read from, e.g. because it’s connected to a physical device that 
>>> wants to get/send its state this way.  Admittedly, we have 
>>> absolutely no concrete example for such a back-end at this point, 
>>> but it’s hard to rule out that it is possible that there will be 
>>> back-ends that could make use of zero-copy if only they are allowed 
>>> to dictate the transfer FD.
>>>
>>> So because we in qemu can’t (at least not generally) provide an 
>>> efficient (zero-copy) implementation, we don’t want to rule out that 
>>> the back-end might be able to, so we also want to allow it to 
>>> provide the transfer FD.
>>>
>>> In the end, we decided that we don’t want to preclude either side of 
>>> providing the FD.  If one side knows it can do better than a plain 
>>> pipe with copying on both ends, it should provide the FD. That 
>>> doesn’t complicate the implementation much.
>>>
>>> (So, notably, we measure “improves efficiency” based on “is it 
>>> better than a plain pipe with copying on both ends”.  A pipe with 
>>> copying is the default implementation, but as Stefan has pointed out 
>>> in his review, it doesn’t need to be a pipe.  More efficient FDs, 
>>> like the back-end can provide in its reply, would actually likely 
>>> not be pipes.)
>>
>>
>> Yea, but the vhost-user protocol in this patchset defines these FDs 
>> as data transfer channel not the data itself, how about the latter?
>
> I don’t quite understand this either.  Perhaps you’re suggesting that 
> we should just transfer the data over the vhost-user socket. There are 
> two reasons why I don’t think we should do that: First, the vhost-user 
> socket is currently only used for a small amount of data.  For 
> example, the Rust vhost crate will reject any message longer than 4096 
> bytes.  So if I were to use it for more data, I feel like I’d be 
> abusing it.  Second, if/when we add other transfer phases (i.e. while 
> the VM is still running, to transfer larger amounts of state), it will 
> be important that the vhost-user socket is open for messages from the 
> front-end at any time.  So then, at the latest, would we need a 
> separate data channel.  I don’t feel like there is harm in making the 
> interface have this separate channel now already.
>
> Or maybe you suggest that the separate FD is OK, but it shouldn’t be 
> defined to be a data transfer channel, but just data, i.e. not 
> necessarily a pipe.  I think that’s basically the discussion I had 
> with Stefan on patch 1: That the important thing is just that the 
> receiving end can read data out of the FD until EOF, and the writing 
> end can write data and close the FD to signal the end of data. Whether 
> they actually do that is not important, only that anything they do 
> will let the other end do that.


I re-read the discussion of patch1, I now see what I suggested is 
already included in the protocol design.


>
> Hanna
>
diff mbox series

Patch

diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index 31a251a9f5..e59d0b53f8 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -26,6 +26,18 @@  typedef enum VhostSetConfigType {
     VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
 } VhostSetConfigType;
 
+typedef enum VhostDeviceStateDirection {
+    /* Transfer state from back-end (device) to front-end */
+    VHOST_TRANSFER_STATE_DIRECTION_SAVE = 0,
+    /* Transfer state from front-end to back-end (device) */
+    VHOST_TRANSFER_STATE_DIRECTION_LOAD = 1,
+} VhostDeviceStateDirection;
+
+typedef enum VhostDeviceStatePhase {
+    /* The device (and all its vrings) is stopped */
+    VHOST_TRANSFER_STATE_PHASE_STOPPED = 0,
+} VhostDeviceStatePhase;
+
 struct vhost_inflight;
 struct vhost_dev;
 struct vhost_log;
@@ -133,6 +145,15 @@  typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev,
 
 typedef void (*vhost_reset_status_op)(struct vhost_dev *dev);
 
+typedef bool (*vhost_supports_migratory_state_op)(struct vhost_dev *dev);
+typedef int (*vhost_set_device_state_fd_op)(struct vhost_dev *dev,
+                                            VhostDeviceStateDirection direction,
+                                            VhostDeviceStatePhase phase,
+                                            int fd,
+                                            int *reply_fd,
+                                            Error **errp);
+typedef int (*vhost_check_device_state_op)(struct vhost_dev *dev, Error **errp);
+
 typedef struct VhostOps {
     VhostBackendType backend_type;
     vhost_backend_init vhost_backend_init;
@@ -181,6 +202,9 @@  typedef struct VhostOps {
     vhost_force_iommu_op vhost_force_iommu;
     vhost_set_config_call_op vhost_set_config_call;
     vhost_reset_status_op vhost_reset_status;
+    vhost_supports_migratory_state_op vhost_supports_migratory_state;
+    vhost_set_device_state_fd_op vhost_set_device_state_fd;
+    vhost_check_device_state_op vhost_check_device_state;
 } VhostOps;
 
 int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 69bf59d630..d8877496e5 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -346,4 +346,83 @@  int vhost_dev_set_inflight(struct vhost_dev *dev,
 int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
                            struct vhost_inflight *inflight);
 bool vhost_dev_has_iommu(struct vhost_dev *dev);
+
+/**
+ * vhost_supports_migratory_state(): Checks whether the back-end
+ * supports transferring internal state for the purpose of migration.
+ * Support for this feature is required for vhost_set_device_state_fd()
+ * and vhost_check_device_state().
+ *
+ * @dev: The vhost device
+ *
+ * Returns true if the device supports these commands, and false if it
+ * does not.
+ */
+bool vhost_supports_migratory_state(struct vhost_dev *dev);
+
+/**
+ * vhost_set_device_state_fd(): Begin transfer of internal state from/to
+ * the back-end for the purpose of migration.  Data is to be transferred
+ * over a pipe according to @direction and @phase.  The sending end must
+ * only write to the pipe, and the receiving end must only read from it.
+ * Once the sending end is done, it closes its FD.  The receiving end
+ * must take this as the end-of-transfer signal and close its FD, too.
+ *
+ * @fd is the back-end's end of the pipe: The write FD for SAVE, and the
+ * read FD for LOAD.  This function transfers ownership of @fd to the
+ * back-end, i.e. closes it in the front-end.
+ *
+ * The back-end may optionally reply with an FD of its own, if this
+ * improves efficiency on its end.  In this case, the returned FD is
+ * stored in *reply_fd.  The back-end will discard the FD sent to it,
+ * and the front-end must use *reply_fd for transferring state to/from
+ * the back-end.
+ *
+ * @dev: The vhost device
+ * @direction: The direction in which the state is to be transferred.
+ *             For outgoing migrations, this is SAVE, and data is read
+ *             from the back-end and stored by the front-end in the
+ *             migration stream.
+ *             For incoming migrations, this is LOAD, and data is read
+ *             by the front-end from the migration stream and sent to
+ *             the back-end to restore the saved state.
+ * @phase: Which migration phase we are in.  Currently, there is only
+ *         STOPPED (device and all vrings are stopped), in the future,
+ *         more phases such as PRE_COPY or POST_COPY may be added.
+ * @fd: Back-end's end of the pipe through which to transfer state; note
+ *      that ownership is transferred to the back-end, so this function
+ *      closes @fd in the front-end.
+ * @reply_fd: If the back-end wishes to use a different pipe for state
+ *            transfer, this will contain an FD for the front-end to
+ *            use.  Otherwise, -1 is stored here.
+ * @errp: Potential error description
+ *
+ * Returns 0 on success, and -errno on failure.
+ */
+int vhost_set_device_state_fd(struct vhost_dev *dev,
+                              VhostDeviceStateDirection direction,
+                              VhostDeviceStatePhase phase,
+                              int fd,
+                              int *reply_fd,
+                              Error **errp);
+
+/**
+ * vhost_set_device_state_fd(): After transferring state from/to the
+ * back-end via vhost_set_device_state_fd(), i.e. once the sending end
+ * has closed the pipe, inquire the back-end to report any potential
+ * errors that have occurred on its side.  This allows to sense errors
+ * like:
+ * - During outgoing migration, when the source side had already started
+ *   to produce its state, something went wrong and it failed to finish
+ * - During incoming migration, when the received state is somehow
+ *   invalid and cannot be processed by the back-end
+ *
+ * @dev: The vhost device
+ * @errp: Potential error description
+ *
+ * Returns 0 when the back-end reports successful state transfer and
+ * processing, and -errno when an error occurred somewhere.
+ */
+int vhost_check_device_state(struct vhost_dev *dev, Error **errp);
+
 #endif
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 53a881ec2a..8e6b5485e8 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -76,6 +76,7 @@  enum VhostUserProtocolFeature {
     VHOST_USER_PROTOCOL_F_STATUS = 16,
     /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
     VHOST_USER_PROTOCOL_F_SUSPEND = 18,
+    VHOST_USER_PROTOCOL_F_MIGRATORY_STATE = 19,
     VHOST_USER_PROTOCOL_F_MAX
 };
 
@@ -125,6 +126,8 @@  typedef enum VhostUserRequest {
     VHOST_USER_GET_STATUS = 40,
     VHOST_USER_SUSPEND = 41,
     VHOST_USER_RESUME = 42,
+    VHOST_USER_SET_DEVICE_STATE_FD = 43,
+    VHOST_USER_CHECK_DEVICE_STATE = 44,
     VHOST_USER_MAX
 } VhostUserRequest;
 
@@ -216,6 +219,12 @@  typedef struct {
     uint32_t size; /* the following payload size */
 } QEMU_PACKED VhostUserHeader;
 
+/* Request payload of VHOST_USER_SET_DEVICE_STATE_FD */
+typedef struct VhostUserTransferDeviceState {
+    uint32_t direction;
+    uint32_t phase;
+} VhostUserTransferDeviceState;
+
 typedef union {
 #define VHOST_USER_VRING_IDX_MASK   (0xff)
 #define VHOST_USER_VRING_NOFD_MASK  (0x1 << 8)
@@ -230,6 +239,7 @@  typedef union {
         VhostUserCryptoSession session;
         VhostUserVringArea area;
         VhostUserInflight inflight;
+        VhostUserTransferDeviceState transfer_state;
 } VhostUserPayload;
 
 typedef struct VhostUserMsg {
@@ -2838,6 +2848,140 @@  static void vhost_user_reset_status(struct vhost_dev *dev)
     }
 }
 
+static bool vhost_user_supports_migratory_state(struct vhost_dev *dev)
+{
+    return virtio_has_feature(dev->protocol_features,
+                              VHOST_USER_PROTOCOL_F_MIGRATORY_STATE);
+}
+
+static int vhost_user_set_device_state_fd(struct vhost_dev *dev,
+                                          VhostDeviceStateDirection direction,
+                                          VhostDeviceStatePhase phase,
+                                          int fd,
+                                          int *reply_fd,
+                                          Error **errp)
+{
+    int ret;
+    struct vhost_user *vu = dev->opaque;
+    VhostUserMsg msg = {
+        .hdr = {
+            .request = VHOST_USER_SET_DEVICE_STATE_FD,
+            .flags = VHOST_USER_VERSION,
+            .size = sizeof(msg.payload.transfer_state),
+        },
+        .payload.transfer_state = {
+            .direction = direction,
+            .phase = phase,
+        },
+    };
+
+    *reply_fd = -1;
+
+    if (!vhost_user_supports_migratory_state(dev)) {
+        close(fd);
+        error_setg(errp, "Back-end does not support migration state transfer");
+        return -ENOTSUP;
+    }
+
+    ret = vhost_user_write(dev, &msg, &fd, 1);
+    close(fd);
+    if (ret < 0) {
+        error_setg_errno(errp, -ret,
+                         "Failed to send SET_DEVICE_STATE_FD message");
+        return ret;
+    }
+
+    ret = vhost_user_read(dev, &msg);
+    if (ret < 0) {
+        error_setg_errno(errp, -ret,
+                         "Failed to receive SET_DEVICE_STATE_FD reply");
+        return ret;
+    }
+
+    if (msg.hdr.request != VHOST_USER_SET_DEVICE_STATE_FD) {
+        error_setg(errp,
+                   "Received unexpected message type, expected %d, received %d",
+                   VHOST_USER_SET_DEVICE_STATE_FD, msg.hdr.request);
+        return -EPROTO;
+    }
+
+    if (msg.hdr.size != sizeof(msg.payload.u64)) {
+        error_setg(errp,
+                   "Received bad message size, expected %zu, received %" PRIu32,
+                   sizeof(msg.payload.u64), msg.hdr.size);
+        return -EPROTO;
+    }
+
+    if ((msg.payload.u64 & 0xff) != 0) {
+        error_setg(errp, "Back-end did not accept migration state transfer");
+        return -EIO;
+    }
+
+    if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) {
+        *reply_fd = qemu_chr_fe_get_msgfd(vu->user->chr);
+        if (*reply_fd < 0) {
+            error_setg(errp,
+                       "Failed to get back-end-provided transfer pipe FD");
+            *reply_fd = -1;
+            return -EIO;
+        }
+    }
+
+    return 0;
+}
+
+static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp)
+{
+    int ret;
+    VhostUserMsg msg = {
+        .hdr = {
+            .request = VHOST_USER_CHECK_DEVICE_STATE,
+            .flags = VHOST_USER_VERSION,
+            .size = 0,
+        },
+    };
+
+    if (!vhost_user_supports_migratory_state(dev)) {
+        error_setg(errp, "Back-end does not support migration state transfer");
+        return -ENOTSUP;
+    }
+
+    ret = vhost_user_write(dev, &msg, NULL, 0);
+    if (ret < 0) {
+        error_setg_errno(errp, -ret,
+                         "Failed to send CHECK_DEVICE_STATE message");
+        return ret;
+    }
+
+    ret = vhost_user_read(dev, &msg);
+    if (ret < 0) {
+        error_setg_errno(errp, -ret,
+                         "Failed to receive CHECK_DEVICE_STATE reply");
+        return ret;
+    }
+
+    if (msg.hdr.request != VHOST_USER_CHECK_DEVICE_STATE) {
+        error_setg(errp,
+                   "Received unexpected message type, expected %d, received %d",
+                   VHOST_USER_CHECK_DEVICE_STATE, msg.hdr.request);
+        return -EPROTO;
+    }
+
+    if (msg.hdr.size != sizeof(msg.payload.u64)) {
+        error_setg(errp,
+                   "Received bad message size, expected %zu, received %" PRIu32,
+                   sizeof(msg.payload.u64), msg.hdr.size);
+        return -EPROTO;
+    }
+
+    if (msg.payload.u64 != 0) {
+        error_setg(errp, "Back-end failed to process its internal state");
+        return -EIO;
+    }
+
+    return 0;
+}
+
 const VhostOps user_ops = {
         .backend_type = VHOST_BACKEND_TYPE_USER,
         .vhost_backend_init = vhost_user_backend_init,
@@ -2874,4 +3018,7 @@  const VhostOps user_ops = {
         .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
         .vhost_dev_start = vhost_user_dev_start,
         .vhost_reset_status = vhost_user_reset_status,
+        .vhost_supports_migratory_state = vhost_user_supports_migratory_state,
+        .vhost_set_device_state_fd = vhost_user_set_device_state_fd,
+        .vhost_check_device_state = vhost_user_check_device_state,
 };
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 2e28e58da7..756b6d55a8 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -2091,3 +2091,40 @@  int vhost_net_set_backend(struct vhost_dev *hdev,
 
     return -ENOSYS;
 }
+
+bool vhost_supports_migratory_state(struct vhost_dev *dev)
+{
+    if (dev->vhost_ops->vhost_supports_migratory_state) {
+        return dev->vhost_ops->vhost_supports_migratory_state(dev);
+    }
+
+    return false;
+}
+
+int vhost_set_device_state_fd(struct vhost_dev *dev,
+                              VhostDeviceStateDirection direction,
+                              VhostDeviceStatePhase phase,
+                              int fd,
+                              int *reply_fd,
+                              Error **errp)
+{
+    if (dev->vhost_ops->vhost_set_device_state_fd) {
+        return dev->vhost_ops->vhost_set_device_state_fd(dev, direction, phase,
+                                                         fd, reply_fd, errp);
+    }
+
+    error_setg(errp,
+               "vhost transport does not support migration state transfer");
+    return -ENOSYS;
+}
+
+int vhost_check_device_state(struct vhost_dev *dev, Error **errp)
+{
+    if (dev->vhost_ops->vhost_check_device_state) {
+        return dev->vhost_ops->vhost_check_device_state(dev, errp);
+    }
+
+    error_setg(errp,
+               "vhost transport does not support migration state transfer");
+    return -ENOSYS;
+}