@@ -146,31 +146,106 @@ static void rtc_coalesced_timer(void *opaque)
}
#endif
-/* handle periodic timer */
-static void periodic_timer_update(RTCState *s, int64_t current_time)
+static uint32_t rtc_periodic_clock_ticks(RTCState *s)
{
- int period_code, period;
- int64_t cur_clock, next_irq_clock;
+ int period_code;
+
+ if (!(s->cmos_data[RTC_REG_B] & REG_B_PIE)) {
+ return 0;
+ }
period_code = s->cmos_data[RTC_REG_A] & 0x0f;
- if (period_code != 0
- && (s->cmos_data[RTC_REG_B] & REG_B_PIE)) {
- if (period_code <= 2)
- period_code += 7;
- /* period in 32 Khz cycles */
- period = 1 << (period_code - 1);
-#ifdef TARGET_I386
- if (period != s->period) {
- s->irq_coalesced = (s->irq_coalesced * s->period) / period;
- DPRINTF_C("cmos: coalesced irqs scaled to %d\n", s->irq_coalesced);
- }
- s->period = period;
-#endif
+ if (!period_code) {
+ return 0;
+ }
+
+ if (period_code <= 2) {
+ period_code += 7;
+ }
+
+ /* period in 32 Khz cycles */
+ return 1 << (period_code - 1);
+}
+
+/*
+ * handle periodic timer. @old_period indicates the periodic timer update
+ * is just due to period adjustment.
+ */
+static void
+periodic_timer_update(RTCState *s, int64_t current_time, uint32_t old_period)
+{
+ uint32_t period;
+ int64_t cur_clock, next_irq_clock, lost_clock = 0;
+
+ period = rtc_periodic_clock_ticks(s);
+
+ if (period) {
/* compute 32 khz clock */
cur_clock =
muldiv64(current_time, RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND);
- next_irq_clock = (cur_clock & ~(period - 1)) + period;
+ /*
+ * if the periodic timer's update is due to period re-configuration,
+ * we should count the clock since last interrupt.
+ */
+ if (old_period) {
+ int64_t last_periodic_clock, next_periodic_clock;
+
+ next_periodic_clock = muldiv64(s->next_periodic_time,
+ RTC_CLOCK_RATE, NANOSECONDS_PER_SECOND);
+ last_periodic_clock = next_periodic_clock - old_period;
+ lost_clock = cur_clock - last_periodic_clock;
+ assert(lost_clock >= 0);
+ }
+
+#ifdef TARGET_I386
+ /*
+ * recalculate the coalesced irqs for two reasons:
+ * a) the lost_clock is more that a period, i,e. the timer
+ * interrupt has been lost, we should catch up the time.
+ *
+ * b) the period may be reconfigured, under this case, when
+ * switching from a shorter to a longer period, scale down
+ * the missing ticks since we expect the OS handler to
+ * treat the delayed ticks as longer. Any leftovers are
+ * put back into lost_clock.
+ * When switching to a shorter period, scale up the missing
+ * ticks since we expect the OS handler to treat the delayed
+ * ticks as shorter.
+ */
+ if (s->lost_tick_policy == LOST_TICK_POLICY_SLEW) {
+ uint32_t old_irq_coalesced = s->irq_coalesced;
+
+ /*
+ * as the old QEMUs only used s->period for the case that
+ * LOST_TICK_POLICY_SLEW is used, in order to keep the
+ * compatible migration, we obey the rule as old QEMUs.
+ */
+ s->period = period;
+
+ lost_clock += old_irq_coalesced * old_period;
+ s->irq_coalesced = lost_clock / s->period;
+ lost_clock %= s->period;
+ if (old_irq_coalesced != s->irq_coalesced ||
+ old_period != s->period) {
+ DPRINTF_C("cmos: coalesced irqs scaled from %d to %d, "
+ "period scaled from %d to %d\n", old_irq_coalesced,
+ s->irq_coalesced, old_period, s->period);
+ rtc_coalesced_timer_update(s);
+ }
+ } else
+#endif
+ {
+ /*
+ * no way to compensate the interrupt if LOST_TICK_POLICY_SLEW
+ * is not used, we should make the time progress anyway.
+ */
+ lost_clock = MIN(lost_clock, period);
+ }
+
+ assert(lost_clock >= 0 && lost_clock <= period);
+
+ next_irq_clock = cur_clock + period - lost_clock;
s->next_periodic_time = muldiv64(next_irq_clock, NANOSECONDS_PER_SECOND,
RTC_CLOCK_RATE) + 1;
timer_mod(s->periodic_timer, s->next_periodic_time);
@@ -186,7 +261,7 @@ static void rtc_periodic_timer(void *opaque)
{
RTCState *s = opaque;
- periodic_timer_update(s, s->next_periodic_time);
+ periodic_timer_update(s, s->next_periodic_time, 0);
s->cmos_data[RTC_REG_C] |= REG_C_PF;
if (s->cmos_data[RTC_REG_B] & REG_B_PIE) {
s->cmos_data[RTC_REG_C] |= REG_C_IRQF;
@@ -391,6 +466,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
RTCState *s = opaque;
+ uint32_t old_period;
bool update_periodic_timer;
if ((addr & 1) == 0) {
@@ -425,6 +501,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
break;
case RTC_REG_A:
update_periodic_timer = (s->cmos_data[RTC_REG_A] ^ data) & 0x0f;
+ old_period = rtc_periodic_clock_ticks(s);
if ((data & 0x60) == 0x60) {
if (rtc_running(s)) {
@@ -450,7 +527,8 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
(s->cmos_data[RTC_REG_A] & REG_A_UIP);
if (update_periodic_timer) {
- periodic_timer_update(s, qemu_clock_get_ns(rtc_clock));
+ periodic_timer_update(s, qemu_clock_get_ns(rtc_clock),
+ old_period);
}
check_update_timer(s);
@@ -458,6 +536,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
case RTC_REG_B:
update_periodic_timer = (s->cmos_data[RTC_REG_B] ^ data)
& REG_B_PIE;
+ old_period = rtc_periodic_clock_ticks(s);
if (data & REG_B_SET) {
/* update cmos to when the rtc was stopping */
@@ -487,7 +566,8 @@ static void cmos_ioport_write(void *opaque, hwaddr addr,
s->cmos_data[RTC_REG_B] = data;
if (update_periodic_timer) {
- periodic_timer_update(s, qemu_clock_get_ns(rtc_clock));
+ periodic_timer_update(s, qemu_clock_get_ns(rtc_clock),
+ old_period);
}
check_update_timer(s);
@@ -757,7 +837,7 @@ static int rtc_post_load(void *opaque, int version_id)
uint64_t now = qemu_clock_get_ns(rtc_clock);
if (now < s->next_periodic_time ||
now > (s->next_periodic_time + get_max_clock_jump())) {
- periodic_timer_update(s, qemu_clock_get_ns(rtc_clock));
+ periodic_timer_update(s, qemu_clock_get_ns(rtc_clock), 0);
}
}
@@ -822,7 +902,7 @@ static void rtc_notify_clock_reset(Notifier *notifier, void *data)
int64_t now = *(int64_t *)data;
rtc_set_date_from_host(ISA_DEVICE(s));
- periodic_timer_update(s, now);
+ periodic_timer_update(s, now, 0);
check_update_timer(s);
#ifdef TARGET_I386
if (s->lost_tick_policy == LOST_TICK_POLICY_SLEW) {