@@ -2167,11 +2167,19 @@ This group incorporates the crash control MSRs. These enlightenments
allow Windows to write crash information such that it can be logged
by Xen.
+=item B<stimer>
+
+This set incorporates the SynIC and synthetic timer MSRs. Windows will
+use synthetic timers in preference to emulated HPET for a source of
+ticks and hence enabling this group will ensure that ticks will be
+consistent with use of an enlightened time source (B<time_ref_count> or
+B<reference_tsc>).
+
=item B<defaults>
This is a special value that enables the default set of groups, which
-is currently the B<base>, B<freq>, B<time_ref_count>, B<apic_assist>
-and B<crash_ctl> groups.
+is currently the B<base>, B<freq>, B<time_ref_count>, B<apic_assist>,
+B<crash_ctl> and B<stimer> groups.
=item B<all>
@@ -324,6 +324,12 @@
*/
#define LIBXL_HAVE_VIRIDIAN_SYNIC 1
+/*
+ * LIBXL_HAVE_VIRIDIAN_STIMER indicates that the 'stimer' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_STIMER 1
+
/*
* LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE indicates that
* libxl_domain_build_info has the u.hvm.acpi_laptop_slate field.
@@ -269,6 +269,7 @@ static int hvm_set_viridian_features(libxl__gc *gc, uint32_t domid,
libxl_bitmap_set(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_TIME_REF_COUNT);
libxl_bitmap_set(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_APIC_ASSIST);
libxl_bitmap_set(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_CRASH_CTL);
+ libxl_bitmap_set(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_STIMER);
}
libxl_for_each_set_bit(v, info->u.hvm.viridian_enable) {
@@ -320,6 +321,9 @@ static int hvm_set_viridian_features(libxl__gc *gc, uint32_t domid,
if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_SYNIC))
mask |= HVMPV_synic;
+ if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_STIMER))
+ mask |= HVMPV_time_ref_count | HVMPV_synic | HVMPV_stimer;
+
if (mask != 0 &&
xc_hvm_param_set(CTX->xch,
domid,
@@ -236,6 +236,7 @@ libxl_viridian_enlightenment = Enumeration("viridian_enlightenment", [
(5, "apic_assist"),
(6, "crash_ctl"),
(7, "synic"),
+ (8, "stimer"),
])
libxl_hdtype = Enumeration("hdtype", [
@@ -74,6 +74,11 @@
int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val);
int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val);
+bool viridian_synic_deliver_timer_msg(struct vcpu *v, unsigned int sintx,
+ unsigned int index,
+ uint64_t expiration,
+ uint64_t delivery);
+
int viridian_synic_vcpu_init(const struct vcpu *v);
int viridian_synic_domain_init(const struct domain *d);
@@ -93,7 +98,9 @@ void viridian_synic_load_domain_ctxt(
int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val);
int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val);
-int viridian_time_vcpu_init(const struct vcpu *v);
+void viridian_time_poll_timers(struct vcpu *v);
+
+int viridian_time_vcpu_init(struct vcpu *v);
int viridian_time_domain_init(const struct domain *d);
void viridian_time_vcpu_deinit(const struct vcpu *v);
@@ -346,9 +346,60 @@ void viridian_synic_domain_deinit(const struct domain *d)
{
}
-void viridian_synic_poll(const struct vcpu *v)
+void viridian_synic_poll(struct vcpu *v)
{
- /* There are currently no message sources */
+ viridian_time_poll_timers(v);
+}
+
+bool viridian_synic_deliver_timer_msg(struct vcpu *v, unsigned int sintx,
+ unsigned int index,
+ uint64_t expiration,
+ uint64_t delivery)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ const union viridian_sint_msr *vs = &vv->sint[sintx];
+ HV_MESSAGE *msg = vv->simp.ptr;
+ struct {
+ uint32_t TimerIndex;
+ uint32_t Reserved;
+ uint64_t ExpirationTime;
+ uint64_t DeliveryTime;
+ } payload = {
+ .TimerIndex = index,
+ .ExpirationTime = expiration,
+ .DeliveryTime = delivery,
+ };
+
+ if ( test_bit(sintx, &vv->msg_pending) )
+ return false;
+
+ /*
+ * To avoid using an atomic test-and-set, and barrier before calling
+ * vlapic_set_irq(), this function must be called in context of the
+ * vcpu receiving the message.
+ */
+ ASSERT(v == current);
+
+ msg += sintx;
+
+ if ( msg->Header.MessageType != HvMessageTypeNone )
+ {
+ msg->Header.MessageFlags.MessagePending = 1;
+ __set_bit(sintx, &vv->msg_pending);
+ return false;
+ }
+
+ msg->Header.MessageType = HvMessageTimerExpired;
+ msg->Header.MessageFlags.MessagePending = 0;
+ msg->Header.PayloadSize = sizeof(payload);
+
+ BUILD_BUG_ON(sizeof(payload) > sizeof(msg->Payload));
+ memcpy(msg->Payload, &payload, sizeof(payload));
+
+ if ( !vs->mask )
+ vlapic_set_irq(vcpu_vlapic(v), vs->vector, 0);
+
+ return true;
}
bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v,
@@ -12,6 +12,7 @@
#include <xen/version.h>
#include <asm/apic.h>
+#include <asm/event.h>
#include <asm/hvm/support.h>
#include "private.h"
@@ -27,8 +28,10 @@ typedef struct _HV_REFERENCE_TSC_PAGE
static void update_reference_tsc(struct domain *d, bool initialize)
{
- const struct viridian_page *rt = &d->arch.hvm.viridian->reference_tsc;
+ struct viridian_domain *vd = d->arch.hvm.viridian;
+ const struct viridian_page *rt = &vd->reference_tsc;
HV_REFERENCE_TSC_PAGE *p = rt->ptr;
+ uint32_t seq;
if ( initialize )
clear_page(p);
@@ -59,6 +62,8 @@ static void update_reference_tsc(struct domain *d, bool initialize)
printk(XENLOG_G_INFO "d%d: VIRIDIAN REFERENCE_TSC: invalidated\n",
d->domain_id);
+
+ vd->reference_tsc_valid = false;
return;
}
@@ -72,11 +77,14 @@ static void update_reference_tsc(struct domain *d, bool initialize)
* ticks per 100ns shifted left by 64.
*/
p->TscScale = ((10000ul << 32) / d->arch.tsc_khz) << 32;
+ smp_wmb();
+
+ seq = p->TscSequence + 1;
+ if ( seq == 0xFFFFFFFF || seq == 0 ) /* Avoid both 'invalid' values */
+ seq = 1;
- p->TscSequence++;
- if ( p->TscSequence == 0xFFFFFFFF ||
- p->TscSequence == 0 ) /* Avoid both 'invalid' values */
- p->TscSequence = 1;
+ p->TscSequence = seq;
+ vd->reference_tsc_valid = true;
}
static int64_t raw_trc_val(const struct domain *d)
@@ -118,18 +126,253 @@ static int64_t time_ref_count(const struct domain *d)
return raw_trc_val(d) + trc->off;
}
+/*
+ * The specification says: "The partition reference time is computed
+ * by the following formula:
+ *
+ * ReferenceTime = ((VirtualTsc * TscScale) >> 64) + TscOffset
+ *
+ * The multiplication is a 64 bit multiplication, which results in a
+ * 128 bit number which is then shifted 64 times to the right to obtain
+ * the high 64 bits."
+ */
+static uint64_t scale_tsc(uint64_t tsc, uint64_t scale, uint64_t offset)
+{
+ uint64_t result;
+
+ /*
+ * Quadword MUL takes an implicit operand in RAX, and puts the result
+ * in RDX:RAX. Because we only want the result of the multiplication
+ * after shifting right by 64 bits, we therefore only need the content
+ * of RDX.
+ */
+ asm ( "mulq %[scale]"
+ : "+a" (tsc), "=d" (result)
+ : [scale] "rm" (scale) );
+
+ return result + offset;
+}
+
+static uint64_t time_now(struct domain *d)
+{
+ uint64_t tsc, scale;
+
+ /*
+ * If the reference TSC page is not enabled, or has been invalidated
+ * fall back to the partition reference counter.
+ */
+ if ( !d->arch.hvm.viridian->reference_tsc_valid )
+ return time_ref_count(d);
+
+ /* Otherwise compute reference time in the same way the guest would */
+ tsc = hvm_get_guest_tsc(pt_global_vcpu_target(d));
+ scale = ((10000ul << 32) / d->arch.tsc_khz) << 32;
+
+ return scale_tsc(tsc, scale, 0);
+}
+
+static void stop_stimer(struct viridian_stimer *vs)
+{
+ if ( !vs->started )
+ return;
+
+ stop_timer(&vs->timer);
+ vs->started = false;
+}
+
+static void stimer_expire(void *data)
+{
+ struct viridian_stimer *vs = data;
+ struct vcpu *v = vs->v;
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int stimerx = vs - &vv->stimer[0];
+
+ set_bit(stimerx, &vv->stimer_pending);
+ vcpu_kick(v);
+}
+
+static void start_stimer(struct viridian_stimer *vs)
+{
+ const struct vcpu *v = vs->v;
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int stimerx = vs - &vv->stimer[0];
+ int64_t now = time_now(v->domain);
+ int64_t expiration;
+ s_time_t timeout;
+
+ if ( !test_and_set_bit(stimerx, &vv->stimer_enabled) )
+ printk(XENLOG_G_INFO "%pv: VIRIDIAN STIMER%u: enabled\n", v,
+ stimerx);
+
+ if ( vs->config.periodic )
+ {
+ /*
+ * The specification says that if the timer is lazy then we
+ * skip over any missed expirations so we can treat this case
+ * as the same as if the timer is currently stopped, i.e. we
+ * just schedule expiration to be 'count' ticks from now.
+ */
+ if ( !vs->started || vs->config.lazy )
+ expiration = now + vs->count;
+ else
+ {
+ unsigned int missed = 0;
+
+ /*
+ * The timer is already started, so we're re-scheduling.
+ * Hence advance the timer expiration by one tick.
+ */
+ expiration = vs->expiration + vs->count;
+
+ /* Now check to see if any expirations have been missed */
+ if ( expiration - now <= 0 )
+ missed = ((now - expiration) / vs->count) + 1;
+
+ /*
+ * The specification says that if the timer is not lazy then
+ * a non-zero missed count should be used to reduce the period
+ * of the timer until it catches up, unless the count has
+ * reached a 'significant number', in which case the timer
+ * should be treated as lazy. Unfortunately the specification
+ * does not state what that number is so the choice of number
+ * here is a pure guess.
+ */
+ if ( missed > 3 )
+ expiration = now + vs->count;
+ else if ( missed )
+ expiration = now + (vs->count / missed);
+ }
+ }
+ else
+ {
+ expiration = vs->count;
+ if ( expiration - now <= 0 )
+ {
+ vs->expiration = expiration;
+ stimer_expire(vs);
+ return;
+ }
+ }
+ ASSERT(expiration - now > 0);
+
+ vs->expiration = expiration;
+ timeout = (expiration - now) * 100ull;
+
+ vs->started = true;
+ clear_bit(stimerx, &vv->stimer_pending);
+ migrate_timer(&vs->timer, v->processor);
+ set_timer(&vs->timer, timeout + NOW());
+}
+
+static void poll_stimer(struct vcpu *v, unsigned int stimerx)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ struct viridian_stimer *vs = &vv->stimer[stimerx];
+
+ /*
+ * Timer expiry may race with the timer being disabled. If the timer
+ * is disabled make sure the pending bit is cleared to avoid re-
+ * polling.
+ */
+ if ( !vs->config.enabled )
+ {
+ clear_bit(stimerx, &vv->stimer_pending);
+ return;
+ }
+
+ if ( !test_bit(stimerx, &vv->stimer_pending) )
+ return;
+
+ if ( !viridian_synic_deliver_timer_msg(v, vs->config.sintx,
+ stimerx, vs->expiration,
+ time_now(v->domain)) )
+ return;
+
+ clear_bit(stimerx, &vv->stimer_pending);
+
+ if ( vs->config.periodic )
+ start_stimer(vs);
+ else
+ vs->config.enabled = 0;
+}
+
+void viridian_time_poll_timers(struct vcpu *v)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ if ( !vv->stimer_pending )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ poll_stimer(v, i);
+}
+
+void viridian_time_vcpu_freeze(struct vcpu *v)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ if ( !is_viridian_vcpu(v) ||
+ !(viridian_feature_mask(v->domain) & HVMPV_stimer) )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ if ( vs->started )
+ stop_timer(&vs->timer);
+ }
+}
+
+void viridian_time_vcpu_thaw(struct vcpu *v)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ if ( !is_viridian_vcpu(v) ||
+ !(viridian_feature_mask(v->domain) & HVMPV_stimer) )
+ return;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ if ( vs->config.enabled )
+ start_stimer(vs);
+ }
+}
+
void viridian_time_domain_freeze(const struct domain *d)
{
+ struct vcpu *v;
+
+ if ( !is_viridian_domain(d) )
+ return;
+
+ for_each_vcpu ( d, v )
+ viridian_time_vcpu_freeze(v);
+
time_ref_count_freeze(d);
}
void viridian_time_domain_thaw(const struct domain *d)
{
+ struct vcpu *v;
+
+ if ( !is_viridian_domain(d) )
+ return;
+
time_ref_count_thaw(d);
+
+ for_each_vcpu ( d, v )
+ viridian_time_vcpu_thaw(v);
}
int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
struct domain *d = v->domain;
struct viridian_domain *vd = d->arch.hvm.viridian;
@@ -149,6 +392,61 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
}
break;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ return X86EMUL_EXCEPTION;
+
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+ struct viridian_stimer *vs =
+ &array_access_nospec(vv->stimer, stimerx);
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ stop_stimer(vs);
+
+ vs->config.raw = val;
+
+ if ( !vs->config.sintx )
+ vs->config.enabled = 0;
+
+ if ( vs->config.enabled )
+ start_stimer(vs);
+
+ break;
+ }
+
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+ struct viridian_stimer *vs =
+ &array_access_nospec(vv->stimer, stimerx);
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ stop_stimer(vs);
+
+ vs->count = val;
+
+ if ( !vs->count )
+ vs->config.enabled = 0;
+ else if ( vs->config.auto_enable )
+ vs->config.enabled = 1;
+
+ if ( vs->config.enabled )
+ start_stimer(vs);
+
+ break;
+ }
+
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
__func__, idx, val);
@@ -160,6 +458,7 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
{
+ const struct viridian_vcpu *vv = v->arch.hvm.viridian;
const struct domain *d = v->domain;
struct viridian_domain *vd = d->arch.hvm.viridian;
@@ -201,6 +500,38 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
break;
}
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+ const struct viridian_stimer *vs =
+ &array_access_nospec(vv->stimer, stimerx);
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = vs->config.raw;
+ break;
+ }
+
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ {
+ unsigned int stimerx = (idx - HV_X64_MSR_STIMER0_CONFIG) / 2;
+ const struct viridian_stimer *vs =
+ &array_access_nospec(vv->stimer, stimerx);
+
+ if ( !(viridian_feature_mask(d) & HVMPV_stimer) )
+ return X86EMUL_EXCEPTION;
+
+ *val = vs->count;
+ break;
+ }
+
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
return X86EMUL_EXCEPTION;
@@ -209,8 +540,19 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
return X86EMUL_OKAY;
}
-int viridian_time_vcpu_init(const struct vcpu *v)
+int viridian_time_vcpu_init(struct vcpu *v)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ vs->v = v;
+ init_timer(&vs->timer, stimer_expire, vs, v->processor);
+ }
+
return 0;
}
@@ -221,6 +563,16 @@ int viridian_time_domain_init(const struct domain *d)
void viridian_time_vcpu_deinit(const struct vcpu *v)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ kill_timer(&vs->timer);
+ vs->v = NULL;
+ }
}
void viridian_time_domain_deinit(const struct domain *d)
@@ -231,11 +583,36 @@ void viridian_time_domain_deinit(const struct domain *d)
void viridian_time_save_vcpu_ctxt(
const struct vcpu *v, struct hvm_viridian_vcpu_context *ctxt)
{
+ const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(vv->stimer) !=
+ ARRAY_SIZE(ctxt->stimer_config_msr));
+ BUILD_BUG_ON(ARRAY_SIZE(vv->stimer) !=
+ ARRAY_SIZE(ctxt->stimer_count_msr));
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ const struct viridian_stimer *vs = &vv->stimer[i];
+
+ ctxt->stimer_config_msr[i] = vs->config.raw;
+ ctxt->stimer_count_msr[i] = vs->count;
+ }
}
void viridian_time_load_vcpu_ctxt(
struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ for ( i = 0; i < ARRAY_SIZE(vv->stimer); i++ )
+ {
+ struct viridian_stimer *vs = &vv->stimer[i];
+
+ vs->config.raw = ctxt->stimer_config_msr[i];
+ vs->count = ctxt->stimer_count_msr[i];
+ }
}
void viridian_time_save_domain_ctxt(
@@ -181,6 +181,8 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
mask.AccessPartitionReferenceTsc = 1;
if ( viridian_feature_mask(d) & HVMPV_synic )
mask.AccessSynicRegs = 1;
+ if ( viridian_feature_mask(d) & HVMPV_stimer )
+ mask.AccessSyntheticTimerRegs = 1;
u.mask = mask;
@@ -322,6 +324,8 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val)
case HV_X64_MSR_TSC_FREQUENCY:
case HV_X64_MSR_APIC_FREQUENCY:
case HV_X64_MSR_REFERENCE_TSC:
+ case HV_X64_MSR_TIME_REF_COUNT:
+ case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
return viridian_time_wrmsr(v, idx, val);
case HV_X64_MSR_CRASH_P0:
@@ -403,6 +407,7 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val)
case HV_X64_MSR_APIC_FREQUENCY:
case HV_X64_MSR_REFERENCE_TSC:
case HV_X64_MSR_TIME_REF_COUNT:
+ case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
return viridian_time_rdmsr(v, idx, val);
case HV_X64_MSR_CRASH_P0:
@@ -40,6 +40,32 @@ union viridian_sint_msr
};
};
+union viridian_stimer_config_msr
+{
+ uint64_t raw;
+ struct
+ {
+ uint64_t enabled:1;
+ uint64_t periodic:1;
+ uint64_t lazy:1;
+ uint64_t auto_enable:1;
+ uint64_t vector:8;
+ uint64_t direct_mode:1;
+ uint64_t reserved_zero1:3;
+ uint64_t sintx:4;
+ uint64_t reserved_zero2:44;
+ };
+};
+
+struct viridian_stimer {
+ struct vcpu *v;
+ struct timer timer;
+ union viridian_stimer_config_msr config;
+ uint64_t count;
+ uint64_t expiration;
+ bool started;
+};
+
struct viridian_vcpu
{
struct viridian_page vp_assist;
@@ -51,6 +77,9 @@ struct viridian_vcpu
struct viridian_page simp;
union viridian_sint_msr sint[16];
uint8_t vector_to_sintx[256];
+ struct viridian_stimer stimer[4];
+ unsigned int stimer_enabled;
+ unsigned int stimer_pending;
uint64_t crash_param[5];
};
@@ -87,6 +116,7 @@ struct viridian_domain
union viridian_page_msr hypercall_gpa;
struct viridian_time_ref_count time_ref_count;
struct viridian_page reference_tsc;
+ bool reference_tsc_valid;
};
void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
@@ -111,7 +141,7 @@ void viridian_apic_assist_set(const struct vcpu *v);
bool viridian_apic_assist_completed(const struct vcpu *v);
void viridian_apic_assist_clear(const struct vcpu *v);
-void viridian_synic_poll(const struct vcpu *v);
+void viridian_synic_poll(struct vcpu *v);
bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v,
unsigned int vector);
void viridian_synic_ack_sint(const struct vcpu *v, unsigned int vector);
@@ -604,6 +604,8 @@ struct hvm_viridian_vcpu_context {
uint8_t _pad[7];
uint64_t simp_msr;
uint64_t sint_msr[16];
+ uint64_t stimer_config_msr[4];
+ uint64_t stimer_count_msr[4];
};
DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
@@ -150,6 +150,10 @@
#define _HVMPV_synic 7
#define HVMPV_synic (1 << _HVMPV_synic)
+/* Enable STIMER MSRs */
+#define _HVMPV_stimer 8
+#define HVMPV_stimer (1 << _HVMPV_stimer)
+
#define HVMPV_feature_mask \
(HVMPV_base_freq | \
HVMPV_no_freq | \
@@ -158,7 +162,8 @@
HVMPV_hcall_remote_tlb_flush | \
HVMPV_apic_assist | \
HVMPV_crash_ctl | \
- HVMPV_synic)
+ HVMPV_synic | \
+ HVMPV_stimer)
#endif