@@ -910,6 +910,8 @@ static void __update_vcpu_system_time(struct vcpu *v, int force)
_u.tsc_timestamp = tsc_stamp;
_u.system_time = t->stime_local_stamp;
+ if ( clocksource_is_tsc )
+ _u.flags |= PVCLOCK_TSC_STABLE_BIT;
if ( is_hvm_domain(d) )
_u.tsc_timestamp += v->arch.hvm_vcpu.cache_tsc_offset;
@@ -1370,9 +1372,12 @@ static void time_calibration_std_rendezvous(void *_r)
if ( smp_processor_id() == 0 )
{
+ u64 last_counter;
while ( atomic_read(&r->semaphore) != (total_cpus - 1) )
cpu_relax();
- r->master_stime = read_platform_stime();
+ r->master_stime = read_platform_stime(&last_counter);
+ if ( clocksource_is_tsc )
+ r->master_tsc_stamp = last_counter;
mb(); /* write r->master_stime /then/ signal */
atomic_inc(&r->semaphore);
}
@@ -1384,7 +1389,10 @@ static void time_calibration_std_rendezvous(void *_r)
mb(); /* receive signal /then/ read r->master_stime */
}
- c->local_tsc_stamp = rdtsc();
+ if ( clocksource_is_tsc )
+ c->local_tsc_stamp = r->master_tsc_stamp;
+ else
+ c->local_tsc_stamp = rdtsc();
c->stime_local_stamp = get_s_time();
c->stime_master_stamp = r->master_stime;
When using TSC as clocksource we will solely rely on TSC for updating vcpu time infos (pvti). Right now, each vCPU takes the tsc_timestamp at different instants meaning every EPOCH + delta. This delta is variable depending on the time the CPU calibrates with CPU 0 (master), and will likely be different and variable across vCPUS. This means that each VCPU pvti won't account to its calibration error which could lead to time going backwards, and allowing a situation where time read on VCPU B immediately after A being smaller. While this doesn't happen a lot, I was able to observe (for clocksource=tsc) around 50 times in an hour having warps of < 100 ns. This patch proposes relying on host TSC synchronization and passthrough of the master tsc to the guest, when running on a TSC-safe platform. On the rendezvous function we will retrieve the platform time in ns and the last count read by the clocksource that was used to compute system time. master will write both master_tsc_stamp and master_stime, and the other vCPUS (slave) will use it to update their correspondent time infos. This way we can guarantee that on a platform with a constant and reliable TSC, that the time read on vcpu B right after A is bigger independently of the VCPU calibration error. Since pvclock time infos are monotonic as seen by any vCPU we set PVCLOCK_TSC_STABLE_BIT flag, which then enables usage of VDSO on Linux. IIUC, this is similar to how it's implemented on KVM. Signed-off-by: Joao Martins <joao.m.martins@oracle.com> --- xen/arch/x86/time.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-)