@@ -318,6 +318,12 @@
*/
#define LIBXL_HAVE_VIRIDIAN_CRASH_CTL 1
+/*
+ * LIBXL_HAVE_VIRIDIAN_SYNIC indicates that the 'synic' value
+ * is present in the viridian enlightenment enumeration.
+ */
+#define LIBXL_HAVE_VIRIDIAN_SYNIC 1
+
/*
* LIBXL_HAVE_BUILDINFO_HVM_ACPI_LAPTOP_SLATE indicates that
* libxl_domain_build_info has the u.hvm.acpi_laptop_slate field.
@@ -317,6 +317,9 @@ static int hvm_set_viridian_features(libxl__gc *gc, uint32_t domid,
if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_CRASH_CTL))
mask |= HVMPV_crash_ctl;
+ if (libxl_bitmap_test(&enlightenments, LIBXL_VIRIDIAN_ENLIGHTENMENT_SYNIC))
+ mask |= HVMPV_synic;
+
if (mask != 0 &&
xc_hvm_param_set(CTX->xch,
domid,
@@ -235,6 +235,7 @@ libxl_viridian_enlightenment = Enumeration("viridian_enlightenment", [
(4, "hcall_remote_tlb_flush"),
(5, "apic_assist"),
(6, "crash_ctl"),
+ (7, "synic"),
])
libxl_hdtype = Enumeration("hdtype", [
@@ -13,6 +13,7 @@
#include <asm/apic.h>
#include <asm/hvm/support.h>
+#include <asm/hvm/vlapic.h>
#include "private.h"
@@ -28,6 +29,37 @@ typedef union _HV_VP_ASSIST_PAGE
uint8_t ReservedZBytePadding[PAGE_SIZE];
} HV_VP_ASSIST_PAGE;
+typedef enum HV_MESSAGE_TYPE {
+ HvMessageTypeNone,
+ HvMessageTimerExpired = 0x80000010,
+} HV_MESSAGE_TYPE;
+
+typedef struct HV_MESSAGE_FLAGS {
+ uint8_t MessagePending:1;
+ uint8_t Reserved:7;
+} HV_MESSAGE_FLAGS;
+
+typedef struct HV_MESSAGE_HEADER {
+ HV_MESSAGE_TYPE MessageType;
+ uint16_t Reserved1;
+ HV_MESSAGE_FLAGS MessageFlags;
+ uint8_t PayloadSize;
+ uint64_t Reserved2;
+} HV_MESSAGE_HEADER;
+
+#define HV_MESSAGE_SIZE 256
+#define HV_MESSAGE_MAX_PAYLOAD_QWORD_COUNT 30
+
+typedef struct HV_MESSAGE {
+ HV_MESSAGE_HEADER Header;
+ uint64_t Payload[HV_MESSAGE_MAX_PAYLOAD_QWORD_COUNT];
+} HV_MESSAGE;
+
+void __init __maybe_unused build_assertions(void)
+{
+ BUILD_BUG_ON(sizeof(HV_MESSAGE) != HV_MESSAGE_SIZE);
+}
+
void viridian_apic_assist_set(const struct vcpu *v)
{
struct viridian_vcpu *vv = v->arch.hvm.viridian;
@@ -83,6 +115,8 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
struct viridian_vcpu *vv = v->arch.hvm.viridian;
struct domain *d = v->domain;
+ ASSERT(v == current || !v->is_running);
+
switch ( idx )
{
case HV_X64_MSR_EOI:
@@ -107,6 +141,76 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
viridian_map_guest_page(d, &vv->vp_assist);
break;
+ case HV_X64_MSR_SCONTROL:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ vv->scontrol = val;
+ break;
+
+ case HV_X64_MSR_SVERSION:
+ return X86EMUL_EXCEPTION;
+
+ case HV_X64_MSR_SIEFP:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ vv->siefp = val;
+ break;
+
+ case HV_X64_MSR_SIMP:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ viridian_unmap_guest_page(&vv->simp);
+ vv->simp.msr.raw = val;
+ viridian_dump_guest_page(v, "SIMP", &vv->simp);
+ if ( vv->simp.msr.enabled )
+ viridian_map_guest_page(d, &vv->simp);
+ break;
+
+ case HV_X64_MSR_EOM:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ vv->msg_pending = 0;
+ break;
+
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ {
+ unsigned int sintx = idx - HV_X64_MSR_SINT0;
+ union viridian_sint_msr new, *vs =
+ &array_access_nospec(vv->sint, sintx);
+ uint8_t vector;
+
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ /* Vectors must be in the range 0x10-0xff inclusive */
+ new.raw = val;
+ if ( new.vector < 0x10 )
+ return X86EMUL_EXCEPTION;
+
+ /*
+ * Invalidate any previous mapping by setting an out-of-range
+ * index before setting the new mapping.
+ */
+ vector = vs->vector;
+ vv->vector_to_sintx[vector] = ARRAY_SIZE(vv->sint);
+
+ vector = new.vector;
+ vv->vector_to_sintx[vector] = sintx;
+
+ printk(XENLOG_G_INFO "%pv: VIRIDIAN SINT%u: vector: %x\n", v, sintx,
+ vector);
+
+ if ( new.polling )
+ __clear_bit(sintx, &vv->msg_pending);
+
+ *vs = new;
+ break;
+ }
+
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x (%016"PRIx64")\n",
__func__, idx, val);
@@ -118,6 +222,9 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
{
+ const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ const struct domain *d = v->domain;
+
switch ( idx )
{
case HV_X64_MSR_EOI:
@@ -131,14 +238,70 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
*val = ((uint64_t)icr2 << 32) | icr;
break;
}
+
case HV_X64_MSR_TPR:
*val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI);
break;
case HV_X64_MSR_VP_ASSIST_PAGE:
- *val = v->arch.hvm.viridian->vp_assist.msr.raw;
+ *val = vv->vp_assist.msr.raw;
+ break;
+
+ case HV_X64_MSR_SCONTROL:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ *val = vv->scontrol;
+ break;
+
+ case HV_X64_MSR_SVERSION:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ /*
+ * The specification says that the version number is 0x00000001
+ * and should be in the lower 32-bits of the MSR, while the
+ * upper 32-bits are reserved... but it doesn't say what they
+ * should be set to. Assume everything but the bottom bit
+ * should be zero.
+ */
+ *val = 1ul;
+ break;
+
+ case HV_X64_MSR_SIEFP:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ *val = vv->siefp;
+ break;
+
+ case HV_X64_MSR_SIMP:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ *val = vv->simp.msr.raw;
break;
+ case HV_X64_MSR_EOM:
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ *val = 0;
+ break;
+
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ {
+ unsigned int sintx = idx - HV_X64_MSR_SINT0;
+ const union viridian_sint_msr *vs =
+ &array_access_nospec(vv->sint, sintx);
+
+ if ( !(viridian_feature_mask(d) & HVMPV_synic) )
+ return X86EMUL_EXCEPTION;
+
+ *val = vs->raw;
+ break;
+ }
+
default:
gdprintk(XENLOG_INFO, "%s: unimplemented MSR %#x\n", __func__, idx);
return X86EMUL_EXCEPTION;
@@ -149,6 +312,20 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
int viridian_synic_vcpu_init(const struct vcpu *v)
{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ /*
+ * The specification says that all synthetic interrupts must be
+ * initally masked.
+ */
+ for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
+ vv->sint[i].mask = 1;
+
+ /* Initialize the mapping array with invalid values */
+ for ( i = 0; i < ARRAY_SIZE(vv->vector_to_sintx); i++ )
+ vv->vector_to_sintx[i] = ARRAY_SIZE(vv->sint);
+
return 0;
}
@@ -159,17 +336,59 @@ int viridian_synic_domain_init(const struct domain *d)
void viridian_synic_vcpu_deinit(const struct vcpu *v)
{
- viridian_unmap_guest_page(&v->arch.hvm.viridian->vp_assist);
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+
+ viridian_unmap_guest_page(&vv->vp_assist);
+ viridian_unmap_guest_page(&vv->simp);
}
void viridian_synic_domain_deinit(const struct domain *d)
{
}
+void viridian_synic_poll(const struct vcpu *v)
+{
+ /* There are currently no message sources */
+}
+
+bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v,
+ unsigned int vector)
+{
+ const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int sintx = vv->vector_to_sintx[vector];
+ const union viridian_sint_msr *vs =
+ &array_access_nospec(vv->sint, sintx);
+
+ if ( sintx >= ARRAY_SIZE(vv->sint) )
+ return false;
+
+ return vs->auto_eoi;
+}
+
+void viridian_synic_ack_sint(const struct vcpu *v, unsigned int vector)
+{
+ struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int sintx = vv->vector_to_sintx[vector];
+
+ ASSERT(v == current);
+
+ if ( sintx < ARRAY_SIZE(vv->sint) )
+ __clear_bit(array_index_nospec(sintx, ARRAY_SIZE(vv->sint)),
+ &vv->msg_pending);
+}
+
void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
struct hvm_viridian_vcpu_context *ctxt)
{
const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+ unsigned int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(vv->sint) != ARRAY_SIZE(ctxt->sint_msr));
+
+ for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
+ ctxt->sint_msr[i] = vv->sint[i].raw;
+
+ ctxt->simp_msr = vv->simp.msr.raw;
ctxt->apic_assist_pending = vv->apic_assist_pending;
ctxt->vp_assist_msr = vv->vp_assist.msr.raw;
@@ -180,12 +399,30 @@ void viridian_synic_load_vcpu_ctxt(
{
struct viridian_vcpu *vv = v->arch.hvm.viridian;
struct domain *d = v->domain;
+ unsigned int i;
vv->vp_assist.msr.raw = ctxt->vp_assist_msr;
if ( vv->vp_assist.msr.enabled )
viridian_map_guest_page(d, &vv->vp_assist);
vv->apic_assist_pending = ctxt->apic_assist_pending;
+
+ vv->simp.msr.raw = ctxt->simp_msr;
+ if ( vv->simp.msr.enabled )
+ viridian_map_guest_page(d, &vv->simp);
+
+ for ( i = 0; i < ARRAY_SIZE(vv->sint); i++ )
+ {
+ uint8_t vector;
+
+ vv->sint[i].raw = ctxt->sint_msr[i];
+
+ vector = vv->sint[i].vector;
+ if ( vector < 0x10 )
+ continue;
+
+ vv->vector_to_sintx[vector] = i;
+ }
}
void viridian_synic_save_domain_ctxt(
@@ -89,6 +89,7 @@ typedef union _HV_CRASH_CTL_REG_CONTENTS
/* Viridian CPUID leaf 3, Hypervisor Feature Indication */
#define CPUID3D_CRASH_MSRS (1 << 10)
+#define CPUID3D_SINT_POLLING (1 << 17)
/* Viridian CPUID leaf 4: Implementation Recommendations. */
#define CPUID4A_HCALL_REMOTE_TLB_FLUSH (1 << 2)
@@ -178,6 +179,8 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
mask.AccessPartitionReferenceCounter = 1;
if ( viridian_feature_mask(d) & HVMPV_reference_tsc )
mask.AccessPartitionReferenceTsc = 1;
+ if ( viridian_feature_mask(d) & HVMPV_synic )
+ mask.AccessSynicRegs = 1;
u.mask = mask;
@@ -186,6 +189,8 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t leaf,
if ( viridian_feature_mask(d) & HVMPV_crash_ctl )
res->d = CPUID3D_CRASH_MSRS;
+ if ( viridian_feature_mask(d) & HVMPV_synic )
+ res->d |= CPUID3D_SINT_POLLING;
break;
}
@@ -306,8 +311,16 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val)
case HV_X64_MSR_ICR:
case HV_X64_MSR_TPR:
case HV_X64_MSR_VP_ASSIST_PAGE:
+ case HV_X64_MSR_SCONTROL:
+ case HV_X64_MSR_SVERSION:
+ case HV_X64_MSR_SIEFP:
+ case HV_X64_MSR_SIMP:
+ case HV_X64_MSR_EOM:
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
return viridian_synic_wrmsr(v, idx, val);
+ case HV_X64_MSR_TSC_FREQUENCY:
+ case HV_X64_MSR_APIC_FREQUENCY:
case HV_X64_MSR_REFERENCE_TSC:
return viridian_time_wrmsr(v, idx, val);
@@ -378,6 +391,12 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val)
case HV_X64_MSR_ICR:
case HV_X64_MSR_TPR:
case HV_X64_MSR_VP_ASSIST_PAGE:
+ case HV_X64_MSR_SCONTROL:
+ case HV_X64_MSR_SVERSION:
+ case HV_X64_MSR_SIEFP:
+ case HV_X64_MSR_SIMP:
+ case HV_X64_MSR_EOM:
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
return viridian_synic_rdmsr(v, idx, val);
case HV_X64_MSR_TSC_FREQUENCY:
@@ -461,10 +461,15 @@ void vlapic_EOI_set(struct vlapic *vlapic)
void vlapic_handle_EOI(struct vlapic *vlapic, u8 vector)
{
- struct domain *d = vlapic_domain(vlapic);
+ struct vcpu *v = vlapic_vcpu(vlapic);
+ struct domain *d = v->domain;
+
+ /* All synic SINTx vectors are edge triggered */
if ( vlapic_test_vector(vector, &vlapic->regs->data[APIC_TMR]) )
vioapic_update_EOI(d, vector);
+ else if ( has_viridian_synic(d) )
+ viridian_synic_ack_sint(v, vector);
hvm_dpci_msi_eoi(d, vector);
}
@@ -1301,6 +1306,13 @@ int vlapic_has_pending_irq(struct vcpu *v)
if ( !vlapic_enabled(vlapic) )
return -1;
+ /*
+ * Poll the viridian message queues before checking the IRR since
+ * a synthetic interrupt may be asserted during the poll.
+ */
+ if ( has_viridian_synic(v->domain) )
+ viridian_synic_poll(v);
+
irr = vlapic_find_highest_irr(vlapic);
if ( irr == -1 )
return -1;
@@ -1360,8 +1372,12 @@ int vlapic_ack_pending_irq(struct vcpu *v, int vector, bool_t force_ack)
}
done:
- vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+ if ( !has_viridian_synic(v->domain) ||
+ !viridian_synic_is_auto_eoi_sint(v, vector) )
+ vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
+
vlapic_clear_irr(vector, vlapic);
+
return 1;
}
@@ -472,6 +472,9 @@ static inline bool hvm_get_guest_bndcfgs(struct vcpu *v, u64 *val)
#define has_viridian_apic_assist(d) \
(is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))
+#define has_viridian_synic(d) \
+ (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_synic))
+
static inline void hvm_inject_exception(
unsigned int vector, unsigned int type,
unsigned int insn_len, int error_code)
@@ -26,10 +26,31 @@ struct viridian_page
void *ptr;
};
+union viridian_sint_msr
+{
+ uint64_t raw;
+ struct
+ {
+ uint64_t vector:8;
+ uint64_t reserved_preserved1:8;
+ uint64_t mask:1;
+ uint64_t auto_eoi:1;
+ uint64_t polling:1;
+ uint64_t reserved_preserved2:45;
+ };
+};
+
struct viridian_vcpu
{
struct viridian_page vp_assist;
bool apic_assist_pending;
+ bool polled;
+ unsigned int msg_pending;
+ uint64_t scontrol;
+ uint64_t siefp;
+ struct viridian_page simp;
+ union viridian_sint_msr sint[16];
+ uint8_t vector_to_sintx[256];
uint64_t crash_param[5];
};
@@ -90,6 +111,11 @@ void viridian_apic_assist_set(const struct vcpu *v);
bool viridian_apic_assist_completed(const struct vcpu *v);
void viridian_apic_assist_clear(const struct vcpu *v);
+void viridian_synic_poll(const struct vcpu *v);
+bool viridian_synic_is_auto_eoi_sint(const struct vcpu *v,
+ unsigned int vector);
+void viridian_synic_ack_sint(const struct vcpu *v, unsigned int vector);
+
#endif /* __ASM_X86_HVM_VIRIDIAN_H__ */
/*
@@ -602,6 +602,8 @@ struct hvm_viridian_vcpu_context {
uint64_t vp_assist_msr;
uint8_t apic_assist_pending;
uint8_t _pad[7];
+ uint64_t simp_msr;
+ uint64_t sint_msr[16];
};
DECLARE_HVM_SAVE_TYPE(VIRIDIAN_VCPU, 17, struct hvm_viridian_vcpu_context);
@@ -146,6 +146,10 @@
#define _HVMPV_crash_ctl 6
#define HVMPV_crash_ctl (1 << _HVMPV_crash_ctl)
+/* Enable SYNIC MSRs */
+#define _HVMPV_synic 7
+#define HVMPV_synic (1 << _HVMPV_synic)
+
#define HVMPV_feature_mask \
(HVMPV_base_freq | \
HVMPV_no_freq | \
@@ -153,7 +157,8 @@
HVMPV_reference_tsc | \
HVMPV_hcall_remote_tlb_flush | \
HVMPV_apic_assist | \
- HVMPV_crash_ctl)
+ HVMPV_crash_ctl | \
+ HVMPV_synic)
#endif