@@ -71,7 +71,7 @@ struct ghcb {
u8 shared_buffer[GHCB_SHARED_BUF_SIZE];
u8 reserved_0xff0[10];
- u16 protocol_version; /* negotiated SEV-ES/GHCB protocol version */
+ u16 version; /* version of the GHCB data structure */
u32 ghcb_usage;
} __packed;
@@ -79,6 +79,9 @@ struct ghcb {
#define GHCB_PROTOCOL_MAX 1ULL
#define GHCB_DEFAULT_USAGE 0ULL
+/* Version of the GHCB data structure */
+#define GHCB_VERSION 1
+
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
enum es_result {
@@ -58,6 +58,88 @@ static void vc_finish_insn(struct es_em_ctxt *ctxt)
ctxt->regs->rip += ctxt->insn.length;
}
+static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt,
+ u64 exit_code, u64 exit_info_1,
+ u64 exit_info_2)
+{
+ enum es_result ret;
+
+ /* Fill in protocol and format specifiers */
+ ghcb->version = GHCB_VERSION;
+ ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
+
+ ghcb_set_sw_exit_code(ghcb, exit_code);
+ ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
+ ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
+
+ wrmsr(MSR_AMD64_SEV_ES_GHCB, __pa(ghcb));
+ VMGEXIT();
+
+ if ((ghcb->save.sw_exit_info_1 & 0xffffffff) == 1) {
+ u64 info = ghcb->save.sw_exit_info_2;
+ unsigned long v;
+
+ v = info & SVM_EVTINJ_VEC_MASK;
+
+ /* Check if exception information from hypervisor is sane. */
+ if ((info & SVM_EVTINJ_VALID) &&
+ ((v == GP_VECTOR) || (v == UD_VECTOR)) &&
+ ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) {
+ ctxt->fi.vector = v;
+ if (info & SVM_EVTINJ_VALID_ERR)
+ ctxt->fi.error_code = info >> 32;
+ ret = ES_EXCEPTION;
+ } else {
+ ret = ES_VMM_ERROR;
+ }
+ } else if (ghcb->save.sw_exit_info_1 & 0xffffffff) {
+ ret = ES_VMM_ERROR;
+ } else {
+ ret = ES_OK;
+ }
+
+ return ret;
+}
+
+static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct ex_regs *regs = ctxt->regs;
+ u32 cr4 = read_cr4();
+ enum es_result ret;
+
+ ghcb_set_rax(ghcb, regs->rax);
+ ghcb_set_rcx(ghcb, regs->rcx);
+
+ if (cr4 & X86_CR4_OSXSAVE) {
+ /* Safe to read xcr0 */
+ u64 xcr0;
+ xgetbv_safe(XCR_XFEATURE_ENABLED_MASK, &xcr0);
+ ghcb_set_xcr0(ghcb, xcr0);
+ } else {
+ /* xgetbv will cause #GP - use reset value for xcr0 */
+ ghcb_set_xcr0(ghcb, 1);
+ }
+
+ ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
+ if (ret != ES_OK)
+ return ret;
+
+ if (!(ghcb_rax_is_valid(ghcb) &&
+ ghcb_rbx_is_valid(ghcb) &&
+ ghcb_rcx_is_valid(ghcb) &&
+ ghcb_rdx_is_valid(ghcb)))
+ return ES_VMM_ERROR;
+
+ regs->rax = ghcb->save.rax;
+ regs->rbx = ghcb->save.rbx;
+ regs->rcx = ghcb->save.rcx;
+ regs->rdx = ghcb->save.rdx;
+
+ return ES_OK;
+}
+
static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
struct ghcb *ghcb,
unsigned long exit_code)
@@ -65,6 +147,9 @@ static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
enum es_result result;
switch (exit_code) {
+ case SVM_EXIT_CPUID:
+ result = vc_handle_cpuid(ghcb, ctxt);
+ break;
default:
/*
* Unexpected #VC exception