diff mbox series

[v4,4/8] migration/multifd: add qpl compression method

Message ID 20240304140028.1590649-5-yuan1.liu@intel.com (mailing list archive)
State New, archived
Headers show
Series Live Migration With IAA | expand

Commit Message

Yuan Liu March 4, 2024, 2 p.m. UTC
add the Query Processing Library (QPL) compression method

Although both qpl and zlib support deflate compression, qpl will
only use the In-Memory Analytics Accelerator(IAA) for compression
and decompression, and IAA is not compatible with the Zlib in
migration, so qpl is used as a new compression method for migration.

How to enable qpl compression during migration:
migrate_set_parameter multifd-compression qpl

The qpl only supports one compression level, there is no qpl
compression level parameter added, users do not need to specify
the qpl compression level.

Signed-off-by: Yuan Liu <yuan1.liu@intel.com>
Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
---
 hw/core/qdev-properties-system.c |   2 +-
 migration/meson.build            |   1 +
 migration/multifd-qpl.c          | 158 +++++++++++++++++++++++++++++++
 migration/multifd.h              |   1 +
 qapi/migration.json              |   7 +-
 5 files changed, 167 insertions(+), 2 deletions(-)
 create mode 100644 migration/multifd-qpl.c

Comments

Fabiano Rosas March 5, 2024, 8:58 p.m. UTC | #1
Yuan Liu <yuan1.liu@intel.com> writes:

> add the Query Processing Library (QPL) compression method
>
> Although both qpl and zlib support deflate compression, qpl will
> only use the In-Memory Analytics Accelerator(IAA) for compression
> and decompression, and IAA is not compatible with the Zlib in
> migration, so qpl is used as a new compression method for migration.
>
> How to enable qpl compression during migration:
> migrate_set_parameter multifd-compression qpl
>
> The qpl only supports one compression level, there is no qpl
> compression level parameter added, users do not need to specify
> the qpl compression level.
>
> Signed-off-by: Yuan Liu <yuan1.liu@intel.com>
> Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
> ---
>  hw/core/qdev-properties-system.c |   2 +-
>  migration/meson.build            |   1 +
>  migration/multifd-qpl.c          | 158 +++++++++++++++++++++++++++++++
>  migration/multifd.h              |   1 +
>  qapi/migration.json              |   7 +-
>  5 files changed, 167 insertions(+), 2 deletions(-)
>  create mode 100644 migration/multifd-qpl.c
>
> diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
> index 1a396521d5..b4f0e5cbdb 100644
> --- a/hw/core/qdev-properties-system.c
> +++ b/hw/core/qdev-properties-system.c
> @@ -658,7 +658,7 @@ const PropertyInfo qdev_prop_fdc_drive_type = {
>  const PropertyInfo qdev_prop_multifd_compression = {
>      .name = "MultiFDCompression",
>      .description = "multifd_compression values, "
> -                   "none/zlib/zstd",
> +                   "none/zlib/zstd/qpl",
>      .enum_table = &MultiFDCompression_lookup,
>      .get = qdev_propinfo_get_enum,
>      .set = qdev_propinfo_set_enum,
> diff --git a/migration/meson.build b/migration/meson.build
> index 92b1cc4297..c155c2d781 100644
> --- a/migration/meson.build
> +++ b/migration/meson.build
> @@ -40,6 +40,7 @@ if get_option('live_block_migration').allowed()
>    system_ss.add(files('block.c'))
>  endif
>  system_ss.add(when: zstd, if_true: files('multifd-zstd.c'))
> +system_ss.add(when: qpl, if_true: files('multifd-qpl.c'))
>  
>  specific_ss.add(when: 'CONFIG_SYSTEM_ONLY',
>                  if_true: files('ram.c',
> diff --git a/migration/multifd-qpl.c b/migration/multifd-qpl.c
> new file mode 100644
> index 0000000000..6b94e732ac
> --- /dev/null
> +++ b/migration/multifd-qpl.c
> @@ -0,0 +1,158 @@
> +/*
> + * Multifd qpl compression accelerator implementation
> + *
> + * Copyright (c) 2023 Intel Corporation
> + *
> + * Authors:
> + *  Yuan Liu<yuan1.liu@intel.com>
> + *
> + * This work is licensed under the terms of the GNU GPL, version 2 or later.
> + * See the COPYING file in the top-level directory.
> + */
> +
> +#include "qemu/osdep.h"
> +#include "qemu/rcu.h"
> +#include "exec/ramblock.h"
> +#include "exec/target_page.h"
> +#include "qapi/error.h"
> +#include "migration.h"
> +#include "trace.h"
> +#include "options.h"
> +#include "multifd.h"
> +#include "qpl/qpl.h"

I don't mind adding a skeleton upfront before adding the implementation,
but adding the headers here hurts the review process. Reviewers will
have to go digging through the next patches to be able to validate each
of these. It's better to include them along with their usage.

What I would do in this patch is maybe just add the new option, the
.json and meson changes and this file with just:

static void multifd_qpl_register(void)
{
    /* noop */
}

Then in the next commit you can implement all the methods in one
go. That way, the docstrings come along with the implementation, which
also facilitates review.

> +
> +struct qpl_data {

typedef struct {} QplData/QPLData, following QEMU's coding style.

> +    qpl_job **job_array;
> +    /* the number of allocated jobs */
> +    uint32_t job_num;
> +    /* the size of data processed by a qpl job */
> +    uint32_t data_size;
> +    /* compressed data buffer */
> +    uint8_t *zbuf;
> +    /* the length of compressed data */
> +    uint32_t *zbuf_hdr;
> +};
> +
> +/**
> + * qpl_send_setup: setup send side
> + *
> + * Setup each channel with QPL compression.
> + *
> + * Returns 0 for success or -1 for error
> + *
> + * @p: Params for the channel that we are using
> + * @errp: pointer to an error
> + */
> +static int qpl_send_setup(MultiFDSendParams *p, Error **errp)
> +{
> +    /* Implement in next patch */
> +    return -1;
> +}
> +
> +/**
> + * qpl_send_cleanup: cleanup send side
> + *
> + * Close the channel and return memory.
> + *
> + * @p: Params for the channel that we are using
> + * @errp: pointer to an error
> + */
> +static void qpl_send_cleanup(MultiFDSendParams *p, Error **errp)
> +{
> +    /* Implement in next patch */
> +}
> +
> +/**
> + * qpl_send_prepare: prepare data to be able to send
> + *
> + * Create a compressed buffer with all the pages that we are going to
> + * send.
> + *
> + * Returns 0 for success or -1 for error
> + *
> + * @p: Params for the channel that we are using
> + * @errp: pointer to an error
> + */
> +static int qpl_send_prepare(MultiFDSendParams *p, Error **errp)
> +{
> +    /* Implement in next patch */
> +    return -1;
> +}
> +
> +/**
> + * qpl_recv_setup: setup receive side
> + *
> + * Create the compressed channel and buffer.
> + *
> + * Returns 0 for success or -1 for error
> + *
> + * @p: Params for the channel that we are using
> + * @errp: pointer to an error
> + */
> +static int qpl_recv_setup(MultiFDRecvParams *p, Error **errp)
> +{
> +    /* Implement in next patch */
> +    return -1;
> +}
> +
> +/**
> + * qpl_recv_cleanup: setup receive side
> + *
> + * Close the channel and return memory.
> + *
> + * @p: Params for the channel that we are using
> + */
> +static void qpl_recv_cleanup(MultiFDRecvParams *p)
> +{
> +    /* Implement in next patch */
> +}
> +
> +/**
> + * qpl_recv_pages: read the data from the channel into actual pages
> + *
> + * Read the compressed buffer, and uncompress it into the actual
> + * pages.
> + *
> + * Returns 0 for success or -1 for error
> + *
> + * @p: Params for the channel that we are using
> + * @errp: pointer to an error
> + */
> +static int qpl_recv_pages(MultiFDRecvParams *p, Error **errp)
> +{
> +    /* Implement in next patch */
> +    return -1;
> +}
> +
> +/**
> + * qpl_get_iov_count: get the count of IOVs
> + *
> + * For QPL compression, in addition to requesting the same number of IOVs
> + * as the page, it also requires an additional IOV to store all compressed
> + * data lengths.
> + *
> + * Returns the count of the IOVs
> + *
> + * @page_count: Indicate the maximum count of pages processed by multifd
> + */
> +static uint32_t qpl_get_iov_count(uint32_t page_count)
> +{
> +    return page_count + 1;
> +}
> +
> +static MultiFDMethods multifd_qpl_ops = {
> +    .send_setup = qpl_send_setup,
> +    .send_cleanup = qpl_send_cleanup,
> +    .send_prepare = qpl_send_prepare,
> +    .recv_setup = qpl_recv_setup,
> +    .recv_cleanup = qpl_recv_cleanup,
> +    .recv_pages = qpl_recv_pages,
> +    .get_iov_count = qpl_get_iov_count
> +};
> +
> +static void multifd_qpl_register(void)
> +{
> +    multifd_register_ops(MULTIFD_COMPRESSION_QPL, &multifd_qpl_ops);
> +}
> +
> +migration_init(multifd_qpl_register);
> diff --git a/migration/multifd.h b/migration/multifd.h
> index d82495c508..0e9361df2a 100644
> --- a/migration/multifd.h
> +++ b/migration/multifd.h
> @@ -33,6 +33,7 @@ bool multifd_queue_page(RAMBlock *block, ram_addr_t offset);
>  #define MULTIFD_FLAG_NOCOMP (0 << 1)
>  #define MULTIFD_FLAG_ZLIB (1 << 1)
>  #define MULTIFD_FLAG_ZSTD (2 << 1)
> +#define MULTIFD_FLAG_QPL (4 << 1)
>  
>  /* This value needs to be a multiple of qemu_target_page_size() */
>  #define MULTIFD_PACKET_SIZE (512 * 1024)
> diff --git a/qapi/migration.json b/qapi/migration.json
> index 5a565d9b8d..e48e3d7065 100644
> --- a/qapi/migration.json
> +++ b/qapi/migration.json
> @@ -625,11 +625,16 @@
>  #
>  # @zstd: use zstd compression method.
>  #
> +# @qpl: use qpl compression method. Query Processing Library(qpl) is based on
> +#       the deflate compression algorithm and use the Intel In-Memory Analytics
> +#       Accelerator(IAA) hardware accelerated compression and decompression.

Missing: (since 9.0)

> +#
>  # Since: 5.0
>  ##
>  { 'enum': 'MultiFDCompression',
>    'data': [ 'none', 'zlib',
> -            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
> +            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' },
> +            { 'name': 'qpl', 'if': 'CONFIG_QPL' } ] }
>  
>  ##
>  # @MigMode:
Yuan Liu March 6, 2024, 2:29 a.m. UTC | #2
> -----Original Message-----
> From: Fabiano Rosas <farosas@suse.de>
> Sent: Wednesday, March 6, 2024 4:58 AM
> To: Liu, Yuan1 <yuan1.liu@intel.com>; peterx@redhat.com
> Cc: qemu-devel@nongnu.org; hao.xiang@bytedance.com;
> bryan.zhang@bytedance.com; Liu, Yuan1 <yuan1.liu@intel.com>; Zou, Nanhai
> <nanhai.zou@intel.com>
> Subject: Re: [PATCH v4 4/8] migration/multifd: add qpl compression method
> 
> Yuan Liu <yuan1.liu@intel.com> writes:
> 
> > add the Query Processing Library (QPL) compression method
> >
> > Although both qpl and zlib support deflate compression, qpl will
> > only use the In-Memory Analytics Accelerator(IAA) for compression
> > and decompression, and IAA is not compatible with the Zlib in
> > migration, so qpl is used as a new compression method for migration.
> >
> > How to enable qpl compression during migration:
> > migrate_set_parameter multifd-compression qpl
> >
> > The qpl only supports one compression level, there is no qpl
> > compression level parameter added, users do not need to specify
> > the qpl compression level.
> >
> > Signed-off-by: Yuan Liu <yuan1.liu@intel.com>
> > Reviewed-by: Nanhai Zou <nanhai.zou@intel.com>
> > ---
> >  hw/core/qdev-properties-system.c |   2 +-
> >  migration/meson.build            |   1 +
> >  migration/multifd-qpl.c          | 158 +++++++++++++++++++++++++++++++
> >  migration/multifd.h              |   1 +
> >  qapi/migration.json              |   7 +-
> >  5 files changed, 167 insertions(+), 2 deletions(-)
> >  create mode 100644 migration/multifd-qpl.c
> >
> > diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-
> system.c
> > index 1a396521d5..b4f0e5cbdb 100644
> > --- a/hw/core/qdev-properties-system.c
> > +++ b/hw/core/qdev-properties-system.c
> > @@ -658,7 +658,7 @@ const PropertyInfo qdev_prop_fdc_drive_type = {
> >  const PropertyInfo qdev_prop_multifd_compression = {
> >      .name = "MultiFDCompression",
> >      .description = "multifd_compression values, "
> > -                   "none/zlib/zstd",
> > +                   "none/zlib/zstd/qpl",
> >      .enum_table = &MultiFDCompression_lookup,
> >      .get = qdev_propinfo_get_enum,
> >      .set = qdev_propinfo_set_enum,
> > diff --git a/migration/meson.build b/migration/meson.build
> > index 92b1cc4297..c155c2d781 100644
> > --- a/migration/meson.build
> > +++ b/migration/meson.build
> > @@ -40,6 +40,7 @@ if get_option('live_block_migration').allowed()
> >    system_ss.add(files('block.c'))
> >  endif
> >  system_ss.add(when: zstd, if_true: files('multifd-zstd.c'))
> > +system_ss.add(when: qpl, if_true: files('multifd-qpl.c'))
> >
> >  specific_ss.add(when: 'CONFIG_SYSTEM_ONLY',
> >                  if_true: files('ram.c',
> > diff --git a/migration/multifd-qpl.c b/migration/multifd-qpl.c
> > new file mode 100644
> > index 0000000000..6b94e732ac
> > --- /dev/null
> > +++ b/migration/multifd-qpl.c
> > @@ -0,0 +1,158 @@
> > +/*
> > + * Multifd qpl compression accelerator implementation
> > + *
> > + * Copyright (c) 2023 Intel Corporation
> > + *
> > + * Authors:
> > + *  Yuan Liu<yuan1.liu@intel.com>
> > + *
> > + * This work is licensed under the terms of the GNU GPL, version 2 or
> later.
> > + * See the COPYING file in the top-level directory.
> > + */
> > +
> > +#include "qemu/osdep.h"
> > +#include "qemu/rcu.h"
> > +#include "exec/ramblock.h"
> > +#include "exec/target_page.h"
> > +#include "qapi/error.h"
> > +#include "migration.h"
> > +#include "trace.h"
> > +#include "options.h"
> > +#include "multifd.h"
> > +#include "qpl/qpl.h"
> 
> I don't mind adding a skeleton upfront before adding the implementation,
> but adding the headers here hurts the review process. Reviewers will
> have to go digging through the next patches to be able to validate each
> of these. It's better to include them along with their usage.
> 
> What I would do in this patch is maybe just add the new option, the
> .json and meson changes and this file with just:
> 
> static void multifd_qpl_register(void)
> {
>     /* noop */
> }
> 
> Then in the next commit you can implement all the methods in one
> go. That way, the docstrings come along with the implementation, which
> also facilitates review.

Thanks for the guidance, I will implement it in the next version.

> > +
> > +struct qpl_data {
> 
> typedef struct {} QplData/QPLData, following QEMU's coding style.

I will fix it in next version

> > +    qpl_job **job_array;
> > +    /* the number of allocated jobs */
> > +    uint32_t job_num;
> > +    /* the size of data processed by a qpl job */
> > +    uint32_t data_size;
> > +    /* compressed data buffer */
> > +    uint8_t *zbuf;
> > +    /* the length of compressed data */
> > +    uint32_t *zbuf_hdr;
> > +};
> > +
> > +/**
> > + * qpl_send_setup: setup send side
> > + *
> > + * Setup each channel with QPL compression.
> > + *
> > + * Returns 0 for success or -1 for error
> > + *
> > + * @p: Params for the channel that we are using
> > + * @errp: pointer to an error
> > + */
> > +static int qpl_send_setup(MultiFDSendParams *p, Error **errp)
> > +{
> > +    /* Implement in next patch */
> > +    return -1;
> > +}
> > +
> > +/**
> > + * qpl_send_cleanup: cleanup send side
> > + *
> > + * Close the channel and return memory.
> > + *
> > + * @p: Params for the channel that we are using
> > + * @errp: pointer to an error
> > + */
> > +static void qpl_send_cleanup(MultiFDSendParams *p, Error **errp)
> > +{
> > +    /* Implement in next patch */
> > +}
> > +
> > +/**
> > + * qpl_send_prepare: prepare data to be able to send
> > + *
> > + * Create a compressed buffer with all the pages that we are going to
> > + * send.
> > + *
> > + * Returns 0 for success or -1 for error
> > + *
> > + * @p: Params for the channel that we are using
> > + * @errp: pointer to an error
> > + */
> > +static int qpl_send_prepare(MultiFDSendParams *p, Error **errp)
> > +{
> > +    /* Implement in next patch */
> > +    return -1;
> > +}
> > +
> > +/**
> > + * qpl_recv_setup: setup receive side
> > + *
> > + * Create the compressed channel and buffer.
> > + *
> > + * Returns 0 for success or -1 for error
> > + *
> > + * @p: Params for the channel that we are using
> > + * @errp: pointer to an error
> > + */
> > +static int qpl_recv_setup(MultiFDRecvParams *p, Error **errp)
> > +{
> > +    /* Implement in next patch */
> > +    return -1;
> > +}
> > +
> > +/**
> > + * qpl_recv_cleanup: setup receive side
> > + *
> > + * Close the channel and return memory.
> > + *
> > + * @p: Params for the channel that we are using
> > + */
> > +static void qpl_recv_cleanup(MultiFDRecvParams *p)
> > +{
> > +    /* Implement in next patch */
> > +}
> > +
> > +/**
> > + * qpl_recv_pages: read the data from the channel into actual pages
> > + *
> > + * Read the compressed buffer, and uncompress it into the actual
> > + * pages.
> > + *
> > + * Returns 0 for success or -1 for error
> > + *
> > + * @p: Params for the channel that we are using
> > + * @errp: pointer to an error
> > + */
> > +static int qpl_recv_pages(MultiFDRecvParams *p, Error **errp)
> > +{
> > +    /* Implement in next patch */
> > +    return -1;
> > +}
> > +
> > +/**
> > + * qpl_get_iov_count: get the count of IOVs
> > + *
> > + * For QPL compression, in addition to requesting the same number of
> IOVs
> > + * as the page, it also requires an additional IOV to store all
> compressed
> > + * data lengths.
> > + *
> > + * Returns the count of the IOVs
> > + *
> > + * @page_count: Indicate the maximum count of pages processed by
> multifd
> > + */
> > +static uint32_t qpl_get_iov_count(uint32_t page_count)
> > +{
> > +    return page_count + 1;
> > +}
> > +
> > +static MultiFDMethods multifd_qpl_ops = {
> > +    .send_setup = qpl_send_setup,
> > +    .send_cleanup = qpl_send_cleanup,
> > +    .send_prepare = qpl_send_prepare,
> > +    .recv_setup = qpl_recv_setup,
> > +    .recv_cleanup = qpl_recv_cleanup,
> > +    .recv_pages = qpl_recv_pages,
> > +    .get_iov_count = qpl_get_iov_count
> > +};
> > +
> > +static void multifd_qpl_register(void)
> > +{
> > +    multifd_register_ops(MULTIFD_COMPRESSION_QPL, &multifd_qpl_ops);
> > +}
> > +
> > +migration_init(multifd_qpl_register);
> > diff --git a/migration/multifd.h b/migration/multifd.h
> > index d82495c508..0e9361df2a 100644
> > --- a/migration/multifd.h
> > +++ b/migration/multifd.h
> > @@ -33,6 +33,7 @@ bool multifd_queue_page(RAMBlock *block, ram_addr_t
> offset);
> >  #define MULTIFD_FLAG_NOCOMP (0 << 1)
> >  #define MULTIFD_FLAG_ZLIB (1 << 1)
> >  #define MULTIFD_FLAG_ZSTD (2 << 1)
> > +#define MULTIFD_FLAG_QPL (4 << 1)
> >
> >  /* This value needs to be a multiple of qemu_target_page_size() */
> >  #define MULTIFD_PACKET_SIZE (512 * 1024)
> > diff --git a/qapi/migration.json b/qapi/migration.json
> > index 5a565d9b8d..e48e3d7065 100644
> > --- a/qapi/migration.json
> > +++ b/qapi/migration.json
> > @@ -625,11 +625,16 @@
> >  #
> >  # @zstd: use zstd compression method.
> >  #
> > +# @qpl: use qpl compression method. Query Processing Library(qpl) is
> based on
> > +#       the deflate compression algorithm and use the Intel In-Memory
> Analytics
> > +#       Accelerator(IAA) hardware accelerated compression and
> decompression.
> 
> Missing: (since 9.0)

Sure, I will add it in next version

> > +#
> >  # Since: 5.0
> >  ##
> >  { 'enum': 'MultiFDCompression',
> >    'data': [ 'none', 'zlib',
> > -            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
> > +            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' },
> > +            { 'name': 'qpl', 'if': 'CONFIG_QPL' } ] }
> >
> >  ##
> >  # @MigMode:
diff mbox series

Patch

diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c
index 1a396521d5..b4f0e5cbdb 100644
--- a/hw/core/qdev-properties-system.c
+++ b/hw/core/qdev-properties-system.c
@@ -658,7 +658,7 @@  const PropertyInfo qdev_prop_fdc_drive_type = {
 const PropertyInfo qdev_prop_multifd_compression = {
     .name = "MultiFDCompression",
     .description = "multifd_compression values, "
-                   "none/zlib/zstd",
+                   "none/zlib/zstd/qpl",
     .enum_table = &MultiFDCompression_lookup,
     .get = qdev_propinfo_get_enum,
     .set = qdev_propinfo_set_enum,
diff --git a/migration/meson.build b/migration/meson.build
index 92b1cc4297..c155c2d781 100644
--- a/migration/meson.build
+++ b/migration/meson.build
@@ -40,6 +40,7 @@  if get_option('live_block_migration').allowed()
   system_ss.add(files('block.c'))
 endif
 system_ss.add(when: zstd, if_true: files('multifd-zstd.c'))
+system_ss.add(when: qpl, if_true: files('multifd-qpl.c'))
 
 specific_ss.add(when: 'CONFIG_SYSTEM_ONLY',
                 if_true: files('ram.c',
diff --git a/migration/multifd-qpl.c b/migration/multifd-qpl.c
new file mode 100644
index 0000000000..6b94e732ac
--- /dev/null
+++ b/migration/multifd-qpl.c
@@ -0,0 +1,158 @@ 
+/*
+ * Multifd qpl compression accelerator implementation
+ *
+ * Copyright (c) 2023 Intel Corporation
+ *
+ * Authors:
+ *  Yuan Liu<yuan1.liu@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/rcu.h"
+#include "exec/ramblock.h"
+#include "exec/target_page.h"
+#include "qapi/error.h"
+#include "migration.h"
+#include "trace.h"
+#include "options.h"
+#include "multifd.h"
+#include "qpl/qpl.h"
+
+struct qpl_data {
+    qpl_job **job_array;
+    /* the number of allocated jobs */
+    uint32_t job_num;
+    /* the size of data processed by a qpl job */
+    uint32_t data_size;
+    /* compressed data buffer */
+    uint8_t *zbuf;
+    /* the length of compressed data */
+    uint32_t *zbuf_hdr;
+};
+
+/**
+ * qpl_send_setup: setup send side
+ *
+ * Setup each channel with QPL compression.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int qpl_send_setup(MultiFDSendParams *p, Error **errp)
+{
+    /* Implement in next patch */
+    return -1;
+}
+
+/**
+ * qpl_send_cleanup: cleanup send side
+ *
+ * Close the channel and return memory.
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static void qpl_send_cleanup(MultiFDSendParams *p, Error **errp)
+{
+    /* Implement in next patch */
+}
+
+/**
+ * qpl_send_prepare: prepare data to be able to send
+ *
+ * Create a compressed buffer with all the pages that we are going to
+ * send.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int qpl_send_prepare(MultiFDSendParams *p, Error **errp)
+{
+    /* Implement in next patch */
+    return -1;
+}
+
+/**
+ * qpl_recv_setup: setup receive side
+ *
+ * Create the compressed channel and buffer.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int qpl_recv_setup(MultiFDRecvParams *p, Error **errp)
+{
+    /* Implement in next patch */
+    return -1;
+}
+
+/**
+ * qpl_recv_cleanup: setup receive side
+ *
+ * Close the channel and return memory.
+ *
+ * @p: Params for the channel that we are using
+ */
+static void qpl_recv_cleanup(MultiFDRecvParams *p)
+{
+    /* Implement in next patch */
+}
+
+/**
+ * qpl_recv_pages: read the data from the channel into actual pages
+ *
+ * Read the compressed buffer, and uncompress it into the actual
+ * pages.
+ *
+ * Returns 0 for success or -1 for error
+ *
+ * @p: Params for the channel that we are using
+ * @errp: pointer to an error
+ */
+static int qpl_recv_pages(MultiFDRecvParams *p, Error **errp)
+{
+    /* Implement in next patch */
+    return -1;
+}
+
+/**
+ * qpl_get_iov_count: get the count of IOVs
+ *
+ * For QPL compression, in addition to requesting the same number of IOVs
+ * as the page, it also requires an additional IOV to store all compressed
+ * data lengths.
+ *
+ * Returns the count of the IOVs
+ *
+ * @page_count: Indicate the maximum count of pages processed by multifd
+ */
+static uint32_t qpl_get_iov_count(uint32_t page_count)
+{
+    return page_count + 1;
+}
+
+static MultiFDMethods multifd_qpl_ops = {
+    .send_setup = qpl_send_setup,
+    .send_cleanup = qpl_send_cleanup,
+    .send_prepare = qpl_send_prepare,
+    .recv_setup = qpl_recv_setup,
+    .recv_cleanup = qpl_recv_cleanup,
+    .recv_pages = qpl_recv_pages,
+    .get_iov_count = qpl_get_iov_count
+};
+
+static void multifd_qpl_register(void)
+{
+    multifd_register_ops(MULTIFD_COMPRESSION_QPL, &multifd_qpl_ops);
+}
+
+migration_init(multifd_qpl_register);
diff --git a/migration/multifd.h b/migration/multifd.h
index d82495c508..0e9361df2a 100644
--- a/migration/multifd.h
+++ b/migration/multifd.h
@@ -33,6 +33,7 @@  bool multifd_queue_page(RAMBlock *block, ram_addr_t offset);
 #define MULTIFD_FLAG_NOCOMP (0 << 1)
 #define MULTIFD_FLAG_ZLIB (1 << 1)
 #define MULTIFD_FLAG_ZSTD (2 << 1)
+#define MULTIFD_FLAG_QPL (4 << 1)
 
 /* This value needs to be a multiple of qemu_target_page_size() */
 #define MULTIFD_PACKET_SIZE (512 * 1024)
diff --git a/qapi/migration.json b/qapi/migration.json
index 5a565d9b8d..e48e3d7065 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -625,11 +625,16 @@ 
 #
 # @zstd: use zstd compression method.
 #
+# @qpl: use qpl compression method. Query Processing Library(qpl) is based on
+#       the deflate compression algorithm and use the Intel In-Memory Analytics
+#       Accelerator(IAA) hardware accelerated compression and decompression.
+#
 # Since: 5.0
 ##
 { 'enum': 'MultiFDCompression',
   'data': [ 'none', 'zlib',
-            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] }
+            { 'name': 'zstd', 'if': 'CONFIG_ZSTD' },
+            { 'name': 'qpl', 'if': 'CONFIG_QPL' } ] }
 
 ##
 # @MigMode: