diff mbox series

[v3,3/4] migration: Support periodic ramblock dirty sync

Message ID f1067c9ddca005629e64d7e77c98686612bb1f82.1729064919.git.yong.huang@smartx.com (mailing list archive)
State New
Headers show
Series migration: auto-converge refinements for huge VM | expand

Commit Message

Yong Huang Oct. 16, 2024, 7:56 a.m. UTC
From: Hyman Huang <yong.huang@smartx.com>

When VM is configured with huge memory, the current throttle logic
doesn't look like to scale, because migration_trigger_throttle()
is only called for each iteration, so it won't be invoked for a long
time if one iteration can take a long time.

The periodic dirty sync aims to fix the above issue by synchronizing
the ramblock from remote dirty bitmap and, when necessary, triggering
the CPU throttle multiple times during a long iteration.

This is a trade-off between synchronization overhead and CPU throttle
impact.

Signed-off-by: Hyman Huang <yong.huang@smartx.com>
---
 migration/cpu-throttle.c | 70 +++++++++++++++++++++++++++++++++++++++-
 migration/cpu-throttle.h | 14 ++++++++
 migration/migration.h    |  1 +
 migration/ram.c          |  9 ++++--
 migration/trace-events   |  1 +
 5 files changed, 92 insertions(+), 3 deletions(-)

Comments

Peter Xu Oct. 16, 2024, 6:49 p.m. UTC | #1
On Wed, Oct 16, 2024 at 03:56:44PM +0800, yong.huang@smartx.com wrote:
> From: Hyman Huang <yong.huang@smartx.com>
> 
> When VM is configured with huge memory, the current throttle logic
> doesn't look like to scale, because migration_trigger_throttle()
> is only called for each iteration, so it won't be invoked for a long
> time if one iteration can take a long time.
> 
> The periodic dirty sync aims to fix the above issue by synchronizing
> the ramblock from remote dirty bitmap and, when necessary, triggering
> the CPU throttle multiple times during a long iteration.
> 
> This is a trade-off between synchronization overhead and CPU throttle
> impact.
> 
> Signed-off-by: Hyman Huang <yong.huang@smartx.com>
> ---
>  migration/cpu-throttle.c | 70 +++++++++++++++++++++++++++++++++++++++-
>  migration/cpu-throttle.h | 14 ++++++++
>  migration/migration.h    |  1 +
>  migration/ram.c          |  9 ++++--
>  migration/trace-events   |  1 +
>  5 files changed, 92 insertions(+), 3 deletions(-)
> 
> diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
> index fa47ee2e21..784b51ab35 100644
> --- a/migration/cpu-throttle.c
> +++ b/migration/cpu-throttle.c
> @@ -28,16 +28,23 @@
>  #include "qemu/main-loop.h"
>  #include "sysemu/cpus.h"
>  #include "cpu-throttle.h"
> +#include "migration.h"
> +#include "migration-stats.h"
> +#include "options.h"
>  #include "trace.h"
>  
>  /* vcpu throttling controls */
> -static QEMUTimer *throttle_timer;
> +static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
>  static unsigned int throttle_percentage;
> +static bool throttle_dirty_sync_timer_active;
>  
>  #define CPU_THROTTLE_PCT_MIN 1
>  #define CPU_THROTTLE_PCT_MAX 99
>  #define CPU_THROTTLE_TIMESLICE_NS 10000000
>  
> +/* RAMBlock dirty sync trigger every five seconds */

Maybe enrich it to say "making sure it is synchronized every five seconds"?
Because it can synchronize faster if each iteration runs faster than 5sec,
so just to emphasize it's a fallback sync, and only with auto converge.

> +#define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000
> +
>  static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
>  {
>      double pct;
> @@ -112,6 +119,7 @@ void cpu_throttle_set(int new_throttle_pct)
>  void cpu_throttle_stop(void)
>  {
>      qatomic_set(&throttle_percentage, 0);
> +    cpu_throttle_dirty_sync_timer(false);
>  }
>  
>  bool cpu_throttle_active(void)
> @@ -124,8 +132,68 @@ int cpu_throttle_get_percentage(void)
>      return qatomic_read(&throttle_percentage);
>  }
>  
> +void cpu_throttle_dirty_sync_timer_tick(void *opaque)
> +{
> +    static uint64_t prev_sync_cnt = 2;

IIUC the hard coded "2" isn't needed, as long as it's guaranteed to be
updated for each timer call, and you special cased "1" anyway below.

> +    uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> +
> +    if (!migrate_auto_converge()) {
> +        /* Stop the timer when auto converge is disabled */
> +        return;

I think we can try to make sure this never starts if !auto-converge, so
assuming this path will never trigger in real life.

> +    }
> +
> +    /*
> +     * The first iteration copies all memory anyhow and has no
> +     * effect on guest performance, therefore omit it to avoid
> +     * paying extra for the sync penalty.
> +     */
> +    if (sync_cnt <= 1) {
> +        goto end;
> +    }
> +
> +    if (sync_cnt == prev_sync_cnt) {
> +        trace_cpu_throttle_dirty_sync();
> +        WITH_RCU_READ_LOCK_GUARD() {
> +            migration_bitmap_sync_precopy(false);
> +        }
> +    }
> +
> +end:
> +    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> +
> +    timer_mod(throttle_dirty_sync_timer,
> +        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> +            CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> +}
> +
> +static bool cpu_throttle_dirty_sync_active(void)
> +{
> +    return qatomic_read(&throttle_dirty_sync_timer_active);
> +}
> +
> +void cpu_throttle_dirty_sync_timer(bool enable)
> +{
> +    if (enable) {
> +        assert(throttle_dirty_sync_timer);
> +        if (!cpu_throttle_dirty_sync_active()) {

I suppose this can be logically racy? As I think after this patch this path
can be invoked both in main thread and migration thread.

The simplest way to do is to move cpu_throttle_stop() call to be under
bql_lock(), so that this will be serialized by BQL.  Then we can add an
assertion at the entry of the function for bql_locked().

> +            timer_mod(throttle_dirty_sync_timer,
> +                qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> +                    CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> +            qatomic_set(&throttle_dirty_sync_timer_active, 1);
> +        }
> +    } else {
> +        if (throttle_dirty_sync_timer != NULL) {

IIUC throttle_dirty_sync_timer is never destroyed, aka, timer_del() only
disables it.  So you should probably use throttle_dirty_sync_timer_active?

> +            timer_del(throttle_dirty_sync_timer);
> +            qatomic_set(&throttle_dirty_sync_timer_active, 0);
> +        }
> +    }
> +}
> +
>  void cpu_throttle_init(void)
>  {
>      throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
>                                    cpu_throttle_timer_tick, NULL);
> +    throttle_dirty_sync_timer =
> +        timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
> +                     cpu_throttle_dirty_sync_timer_tick, NULL);
>  }
> diff --git a/migration/cpu-throttle.h b/migration/cpu-throttle.h
> index d65bdef6d0..420702b8d3 100644
> --- a/migration/cpu-throttle.h
> +++ b/migration/cpu-throttle.h
> @@ -65,4 +65,18 @@ bool cpu_throttle_active(void);
>   */
>  int cpu_throttle_get_percentage(void);
>  
> +/**
> + * cpu_throttle_dirty_sync_timer_tick:
> + *
> + * Dirty sync timer hook.
> + */
> +void cpu_throttle_dirty_sync_timer_tick(void *opaque);
> +
> +/**
> + * cpu_throttle_dirty_sync_timer:
> + *
> + * Start or stop the dirty sync timer.
> + */
> +void cpu_throttle_dirty_sync_timer(bool enable);
> +
>  #endif /* SYSEMU_CPU_THROTTLE_H */
> diff --git a/migration/migration.h b/migration/migration.h
> index 38aa1402d5..fbd0d19092 100644
> --- a/migration/migration.h
> +++ b/migration/migration.h
> @@ -537,4 +537,5 @@ int migration_rp_wait(MigrationState *s);
>   */
>  void migration_rp_kick(MigrationState *s);
>  
> +void migration_bitmap_sync_precopy(bool last_stage);
>  #endif
> diff --git a/migration/ram.c b/migration/ram.c
> index 9b5b350405..ac34e731e2 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -1020,6 +1020,11 @@ static void migration_trigger_throttle(RAMState *rs)
>          migration_transferred_bytes() - rs->bytes_xfer_prev;
>      uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
>      uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
> +    bool auto_converge = migrate_auto_converge();
> +
> +    if (auto_converge) {
> +        cpu_throttle_dirty_sync_timer(true);
> +    }

If you have the guard to skip the 1st sync in the timer fn(), IIUC you can
move this earlier to e.g. migration_thread() before iteration starts.
Otherwise it'll be not as clear on when this timer will start if it hides
in the sync path itself.

>  
>      /*
>       * The following detection logic can be refined later. For now:
> @@ -1031,7 +1036,7 @@ static void migration_trigger_throttle(RAMState *rs)
>      if ((bytes_dirty_period > bytes_dirty_threshold) &&
>          (++rs->dirty_rate_high_cnt >= 2)) {
>          rs->dirty_rate_high_cnt = 0;
> -        if (migrate_auto_converge()) {
> +        if (auto_converge) {
>              trace_migration_throttle();
>              mig_throttle_guest_down(bytes_dirty_period,
>                                      bytes_dirty_threshold);
> @@ -1088,7 +1093,7 @@ static void migration_bitmap_sync(RAMState *rs, bool last_stage)
>      }
>  }
>  
> -static void migration_bitmap_sync_precopy(bool last_stage)
> +void migration_bitmap_sync_precopy(bool last_stage)
>  {
>      Error *local_err = NULL;
>      assert(ram_state);
> diff --git a/migration/trace-events b/migration/trace-events
> index 9a19599804..0638183056 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -381,3 +381,4 @@ migration_pagecache_insert(void) "Error allocating page"
>  
>  # cpu-throttle.c
>  cpu_throttle_set(int new_throttle_pct)  "set guest CPU throttled by %d%%"
> +cpu_throttle_dirty_sync(void) ""
> -- 
> 2.27.0
>
Yong Huang Oct. 17, 2024, 3:58 a.m. UTC | #2
On Thu, Oct 17, 2024 at 2:49 AM Peter Xu <peterx@redhat.com> wrote:

> On Wed, Oct 16, 2024 at 03:56:44PM +0800, yong.huang@smartx.com wrote:
> > From: Hyman Huang <yong.huang@smartx.com>
> >
> > When VM is configured with huge memory, the current throttle logic
> > doesn't look like to scale, because migration_trigger_throttle()
> > is only called for each iteration, so it won't be invoked for a long
> > time if one iteration can take a long time.
> >
> > The periodic dirty sync aims to fix the above issue by synchronizing
> > the ramblock from remote dirty bitmap and, when necessary, triggering
> > the CPU throttle multiple times during a long iteration.
> >
> > This is a trade-off between synchronization overhead and CPU throttle
> > impact.
> >
> > Signed-off-by: Hyman Huang <yong.huang@smartx.com>
> > ---
> >  migration/cpu-throttle.c | 70 +++++++++++++++++++++++++++++++++++++++-
> >  migration/cpu-throttle.h | 14 ++++++++
> >  migration/migration.h    |  1 +
> >  migration/ram.c          |  9 ++++--
> >  migration/trace-events   |  1 +
> >  5 files changed, 92 insertions(+), 3 deletions(-)
> >
> > diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
> > index fa47ee2e21..784b51ab35 100644
> > --- a/migration/cpu-throttle.c
> > +++ b/migration/cpu-throttle.c
> > @@ -28,16 +28,23 @@
> >  #include "qemu/main-loop.h"
> >  #include "sysemu/cpus.h"
> >  #include "cpu-throttle.h"
> > +#include "migration.h"
> > +#include "migration-stats.h"
> > +#include "options.h"
> >  #include "trace.h"
> >
> >  /* vcpu throttling controls */
> > -static QEMUTimer *throttle_timer;
> > +static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
> >  static unsigned int throttle_percentage;
> > +static bool throttle_dirty_sync_timer_active;
> >
> >  #define CPU_THROTTLE_PCT_MIN 1
> >  #define CPU_THROTTLE_PCT_MAX 99
> >  #define CPU_THROTTLE_TIMESLICE_NS 10000000
> >
> > +/* RAMBlock dirty sync trigger every five seconds */
>
> Maybe enrich it to say "making sure it is synchronized every five seconds"?
> Because it can synchronize faster if each iteration runs faster than 5sec,
> so just to emphasize it's a fallback sync, and only with auto converge.
>

Agree, I'll refine the comment in the next version.


>
> > +#define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000
> > +
> >  static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
> >  {
> >      double pct;
> > @@ -112,6 +119,7 @@ void cpu_throttle_set(int new_throttle_pct)
> >  void cpu_throttle_stop(void)
> >  {
> >      qatomic_set(&throttle_percentage, 0);
> > +    cpu_throttle_dirty_sync_timer(false);
> >  }
> >
> >  bool cpu_throttle_active(void)
> > @@ -124,8 +132,68 @@ int cpu_throttle_get_percentage(void)
> >      return qatomic_read(&throttle_percentage);
> >  }
> >
> > +void cpu_throttle_dirty_sync_timer_tick(void *opaque)
> > +{
> > +    static uint64_t prev_sync_cnt = 2;
>
> IIUC the hard coded "2" isn't needed, as long as it's guaranteed to be
> updated for each timer call, and you special cased "1" anyway below.
>

Ok.


>
> > +    uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> > +
> > +    if (!migrate_auto_converge()) {
> > +        /* Stop the timer when auto converge is disabled */
> > +        return;
>
> I think we can try to make sure this never starts if !auto-converge, so
> assuming this path will never trigger in real life.
>

Indeed, this makes the code cleaner.


> > +    }
> > +
> > +    /*
> > +     * The first iteration copies all memory anyhow and has no
> > +     * effect on guest performance, therefore omit it to avoid
> > +     * paying extra for the sync penalty.
> > +     */
> > +    if (sync_cnt <= 1) {
> > +        goto end;
> > +    }
> > +
> > +    if (sync_cnt == prev_sync_cnt) {
> > +        trace_cpu_throttle_dirty_sync();
> > +        WITH_RCU_READ_LOCK_GUARD() {
> > +            migration_bitmap_sync_precopy(false);
> > +        }
> > +    }
> > +
> > +end:
> > +    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
> > +
> > +    timer_mod(throttle_dirty_sync_timer,
> > +        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> > +            CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> > +}
> > +
> > +static bool cpu_throttle_dirty_sync_active(void)
> > +{
> > +    return qatomic_read(&throttle_dirty_sync_timer_active);
> > +}
> > +
> > +void cpu_throttle_dirty_sync_timer(bool enable)
> > +{
> > +    if (enable) {
> > +        assert(throttle_dirty_sync_timer);
> > +        if (!cpu_throttle_dirty_sync_active()) {
>
> I suppose this can be logically racy? As I think after this patch this path
> can be invoked both in main thread and migration thread.
>

Indeed, thanks for pointing this out.


>
> The simplest way to do is to move cpu_throttle_stop() call to be under
> bql_lock(), so that this will be serialized by BQL.  Then we can add an
> assertion at the entry of the function for bql_locked().


> > +            timer_mod(throttle_dirty_sync_timer,
> > +                qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
> > +                    CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
> > +            qatomic_set(&throttle_dirty_sync_timer_active, 1);
> > +        }
> > +    } else {
> > +        if (throttle_dirty_sync_timer != NULL) {
>
> IIUC throttle_dirty_sync_timer is never destroyed, aka, timer_del() only
> disables it.  So you should probably use throttle_dirty_sync_timer_active?
>
> > +            timer_del(throttle_dirty_sync_timer);
> > +            qatomic_set(&throttle_dirty_sync_timer_active, 0);
> > +        }
> > +    }
> > +}
> > +
> >  void cpu_throttle_init(void)
> >  {
> >      throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
> >                                    cpu_throttle_timer_tick, NULL);
> > +    throttle_dirty_sync_timer =
> > +        timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
> > +                     cpu_throttle_dirty_sync_timer_tick, NULL);
> >  }
> > diff --git a/migration/cpu-throttle.h b/migration/cpu-throttle.h
> > index d65bdef6d0..420702b8d3 100644
> > --- a/migration/cpu-throttle.h
> > +++ b/migration/cpu-throttle.h
> > @@ -65,4 +65,18 @@ bool cpu_throttle_active(void);
> >   */
> >  int cpu_throttle_get_percentage(void);
> >
> > +/**
> > + * cpu_throttle_dirty_sync_timer_tick:
> > + *
> > + * Dirty sync timer hook.
> > + */
> > +void cpu_throttle_dirty_sync_timer_tick(void *opaque);
> > +
> > +/**
> > + * cpu_throttle_dirty_sync_timer:
> > + *
> > + * Start or stop the dirty sync timer.
> > + */
> > +void cpu_throttle_dirty_sync_timer(bool enable);
> > +
> >  #endif /* SYSEMU_CPU_THROTTLE_H */
> > diff --git a/migration/migration.h b/migration/migration.h
> > index 38aa1402d5..fbd0d19092 100644
> > --- a/migration/migration.h
> > +++ b/migration/migration.h
> > @@ -537,4 +537,5 @@ int migration_rp_wait(MigrationState *s);
> >   */
> >  void migration_rp_kick(MigrationState *s);
> >
> > +void migration_bitmap_sync_precopy(bool last_stage);
> >  #endif
> > diff --git a/migration/ram.c b/migration/ram.c
> > index 9b5b350405..ac34e731e2 100644
> > --- a/migration/ram.c
> > +++ b/migration/ram.c
> > @@ -1020,6 +1020,11 @@ static void migration_trigger_throttle(RAMState
> *rs)
> >          migration_transferred_bytes() - rs->bytes_xfer_prev;
> >      uint64_t bytes_dirty_period = rs->num_dirty_pages_period *
> TARGET_PAGE_SIZE;
> >      uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold /
> 100;
> > +    bool auto_converge = migrate_auto_converge();
> > +
> > +    if (auto_converge) {
> > +        cpu_throttle_dirty_sync_timer(true);
> > +    }
>
> If you have the guard to skip the 1st sync in the timer fn(), IIUC you can
> move this earlier to e.g. migration_thread() before iteration starts.
> Otherwise it'll be not as clear on when this timer will start if it hides
> in the sync path itself.
>
> >
> >      /*
> >       * The following detection logic can be refined later. For now:
> > @@ -1031,7 +1036,7 @@ static void migration_trigger_throttle(RAMState
> *rs)
> >      if ((bytes_dirty_period > bytes_dirty_threshold) &&
> >          (++rs->dirty_rate_high_cnt >= 2)) {
> >          rs->dirty_rate_high_cnt = 0;
> > -        if (migrate_auto_converge()) {
> > +        if (auto_converge) {
> >              trace_migration_throttle();
> >              mig_throttle_guest_down(bytes_dirty_period,
> >                                      bytes_dirty_threshold);
> > @@ -1088,7 +1093,7 @@ static void migration_bitmap_sync(RAMState *rs,
> bool last_stage)
> >      }
> >  }
> >
> > -static void migration_bitmap_sync_precopy(bool last_stage)
> > +void migration_bitmap_sync_precopy(bool last_stage)
> >  {
> >      Error *local_err = NULL;
> >      assert(ram_state);
> > diff --git a/migration/trace-events b/migration/trace-events
> > index 9a19599804..0638183056 100644
> > --- a/migration/trace-events
> > +++ b/migration/trace-events
> > @@ -381,3 +381,4 @@ migration_pagecache_insert(void) "Error allocating
> page"
> >
> >  # cpu-throttle.c
> >  cpu_throttle_set(int new_throttle_pct)  "set guest CPU throttled by
> %d%%"
> > +cpu_throttle_dirty_sync(void) ""
> > --
> > 2.27.0
> >
>
> --
> Peter Xu
>
>
All the comments above are advisable, thanks Peter, I'll try it in the next
version.

Yong
diff mbox series

Patch

diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
index fa47ee2e21..784b51ab35 100644
--- a/migration/cpu-throttle.c
+++ b/migration/cpu-throttle.c
@@ -28,16 +28,23 @@ 
 #include "qemu/main-loop.h"
 #include "sysemu/cpus.h"
 #include "cpu-throttle.h"
+#include "migration.h"
+#include "migration-stats.h"
+#include "options.h"
 #include "trace.h"
 
 /* vcpu throttling controls */
-static QEMUTimer *throttle_timer;
+static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
 static unsigned int throttle_percentage;
+static bool throttle_dirty_sync_timer_active;
 
 #define CPU_THROTTLE_PCT_MIN 1
 #define CPU_THROTTLE_PCT_MAX 99
 #define CPU_THROTTLE_TIMESLICE_NS 10000000
 
+/* RAMBlock dirty sync trigger every five seconds */
+#define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000
+
 static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
 {
     double pct;
@@ -112,6 +119,7 @@  void cpu_throttle_set(int new_throttle_pct)
 void cpu_throttle_stop(void)
 {
     qatomic_set(&throttle_percentage, 0);
+    cpu_throttle_dirty_sync_timer(false);
 }
 
 bool cpu_throttle_active(void)
@@ -124,8 +132,68 @@  int cpu_throttle_get_percentage(void)
     return qatomic_read(&throttle_percentage);
 }
 
+void cpu_throttle_dirty_sync_timer_tick(void *opaque)
+{
+    static uint64_t prev_sync_cnt = 2;
+    uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
+
+    if (!migrate_auto_converge()) {
+        /* Stop the timer when auto converge is disabled */
+        return;
+    }
+
+    /*
+     * The first iteration copies all memory anyhow and has no
+     * effect on guest performance, therefore omit it to avoid
+     * paying extra for the sync penalty.
+     */
+    if (sync_cnt <= 1) {
+        goto end;
+    }
+
+    if (sync_cnt == prev_sync_cnt) {
+        trace_cpu_throttle_dirty_sync();
+        WITH_RCU_READ_LOCK_GUARD() {
+            migration_bitmap_sync_precopy(false);
+        }
+    }
+
+end:
+    prev_sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
+
+    timer_mod(throttle_dirty_sync_timer,
+        qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
+            CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
+}
+
+static bool cpu_throttle_dirty_sync_active(void)
+{
+    return qatomic_read(&throttle_dirty_sync_timer_active);
+}
+
+void cpu_throttle_dirty_sync_timer(bool enable)
+{
+    if (enable) {
+        assert(throttle_dirty_sync_timer);
+        if (!cpu_throttle_dirty_sync_active()) {
+            timer_mod(throttle_dirty_sync_timer,
+                qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
+                    CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
+            qatomic_set(&throttle_dirty_sync_timer_active, 1);
+        }
+    } else {
+        if (throttle_dirty_sync_timer != NULL) {
+            timer_del(throttle_dirty_sync_timer);
+            qatomic_set(&throttle_dirty_sync_timer_active, 0);
+        }
+    }
+}
+
 void cpu_throttle_init(void)
 {
     throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
                                   cpu_throttle_timer_tick, NULL);
+    throttle_dirty_sync_timer =
+        timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
+                     cpu_throttle_dirty_sync_timer_tick, NULL);
 }
diff --git a/migration/cpu-throttle.h b/migration/cpu-throttle.h
index d65bdef6d0..420702b8d3 100644
--- a/migration/cpu-throttle.h
+++ b/migration/cpu-throttle.h
@@ -65,4 +65,18 @@  bool cpu_throttle_active(void);
  */
 int cpu_throttle_get_percentage(void);
 
+/**
+ * cpu_throttle_dirty_sync_timer_tick:
+ *
+ * Dirty sync timer hook.
+ */
+void cpu_throttle_dirty_sync_timer_tick(void *opaque);
+
+/**
+ * cpu_throttle_dirty_sync_timer:
+ *
+ * Start or stop the dirty sync timer.
+ */
+void cpu_throttle_dirty_sync_timer(bool enable);
+
 #endif /* SYSEMU_CPU_THROTTLE_H */
diff --git a/migration/migration.h b/migration/migration.h
index 38aa1402d5..fbd0d19092 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -537,4 +537,5 @@  int migration_rp_wait(MigrationState *s);
  */
 void migration_rp_kick(MigrationState *s);
 
+void migration_bitmap_sync_precopy(bool last_stage);
 #endif
diff --git a/migration/ram.c b/migration/ram.c
index 9b5b350405..ac34e731e2 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1020,6 +1020,11 @@  static void migration_trigger_throttle(RAMState *rs)
         migration_transferred_bytes() - rs->bytes_xfer_prev;
     uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
     uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
+    bool auto_converge = migrate_auto_converge();
+
+    if (auto_converge) {
+        cpu_throttle_dirty_sync_timer(true);
+    }
 
     /*
      * The following detection logic can be refined later. For now:
@@ -1031,7 +1036,7 @@  static void migration_trigger_throttle(RAMState *rs)
     if ((bytes_dirty_period > bytes_dirty_threshold) &&
         (++rs->dirty_rate_high_cnt >= 2)) {
         rs->dirty_rate_high_cnt = 0;
-        if (migrate_auto_converge()) {
+        if (auto_converge) {
             trace_migration_throttle();
             mig_throttle_guest_down(bytes_dirty_period,
                                     bytes_dirty_threshold);
@@ -1088,7 +1093,7 @@  static void migration_bitmap_sync(RAMState *rs, bool last_stage)
     }
 }
 
-static void migration_bitmap_sync_precopy(bool last_stage)
+void migration_bitmap_sync_precopy(bool last_stage)
 {
     Error *local_err = NULL;
     assert(ram_state);
diff --git a/migration/trace-events b/migration/trace-events
index 9a19599804..0638183056 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -381,3 +381,4 @@  migration_pagecache_insert(void) "Error allocating page"
 
 # cpu-throttle.c
 cpu_throttle_set(int new_throttle_pct)  "set guest CPU throttled by %d%%"
+cpu_throttle_dirty_sync(void) ""