@@ -1419,6 +1419,28 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ret;
}
+enum es_result savic_register_gpa(u64 gpa)
+{
+ struct ghcb_state state;
+ struct es_em_ctxt ctxt;
+ enum es_result res;
+ struct ghcb *ghcb;
+
+ guard(irqsave)();
+
+ ghcb = __sev_get_ghcb(&state);
+ vc_ghcb_invalidate(ghcb);
+
+ ghcb_set_rax(ghcb, SVM_VMGEXIT_SAVIC_SELF_GPA);
+ ghcb_set_rbx(ghcb, gpa);
+ res = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_SAVIC,
+ SVM_VMGEXIT_SAVIC_REGISTER_GPA, 0);
+
+ __sev_put_ghcb(&state);
+
+ return res;
+}
+
static void snp_register_per_cpu_ghcb(void)
{
struct sev_es_runtime_data *data;
@@ -305,6 +305,7 @@ struct apic {
/* Probe, setup and smpboot functions */
int (*probe)(void);
+ void (*setup)(void);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
void (*init_apic_ldr)(void);
@@ -520,6 +520,7 @@ int snp_svsm_vtpm_send_command(u8 *buffer);
void __init snp_secure_tsc_prepare(void);
void __init snp_secure_tsc_init(void);
+enum es_result savic_register_gpa(u64 gpa);
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{
@@ -570,6 +571,7 @@ static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_
static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; }
static inline void __init snp_secure_tsc_prepare(void) { }
static inline void __init snp_secure_tsc_init(void) { }
+static inline enum es_result savic_register_gpa(u64 gpa) { return ES_UNSUPPORTED; }
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@@ -117,6 +117,10 @@
#define SVM_VMGEXIT_AP_CREATE 1
#define SVM_VMGEXIT_AP_DESTROY 2
#define SVM_VMGEXIT_SNP_RUN_VMPL 0x80000018
+#define SVM_VMGEXIT_SAVIC 0x8000001a
+#define SVM_VMGEXIT_SAVIC_REGISTER_GPA 0
+#define SVM_VMGEXIT_SAVIC_UNREGISTER_GPA 1
+#define SVM_VMGEXIT_SAVIC_SELF_GPA ~0ULL
#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
@@ -1502,6 +1502,8 @@ static void setup_local_APIC(void)
return;
}
+ if (apic->setup)
+ apic->setup();
/*
* If this comes from kexec/kcrash the APIC might be enabled in
* SPIV. Soft disable it before doing further initialization.
@@ -8,17 +8,54 @@
*/
#include <linux/cc_platform.h>
+#include <linux/percpu-defs.h>
#include <asm/apic.h>
#include <asm/sev.h>
#include "local.h"
+/* APIC_EILVTn(3) is the last defined APIC register. */
+#define NR_APIC_REGS (APIC_EILVTn(4) >> 2)
+
+struct apic_page {
+ union {
+ u32 regs[NR_APIC_REGS];
+ u8 bytes[PAGE_SIZE];
+ };
+} __aligned(PAGE_SIZE);
+
+static struct apic_page __percpu *apic_page __ro_after_init;
+
static int savic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return x2apic_enabled() && cc_platform_has(CC_ATTR_SNP_SECURE_AVIC);
}
+static void savic_setup(void)
+{
+ void *backing_page;
+ enum es_result res;
+ unsigned long gpa;
+
+ backing_page = this_cpu_ptr(apic_page);
+ gpa = __pa(backing_page);
+
+ /*
+ * The NPT entry for a vCPU's APIC backing page must always be
+ * present when the vCPU is running in order for Secure AVIC to
+ * function. A VMEXIT_BUSY is returned on VMRUN and the vCPU cannot
+ * be resumed if the NPT entry for the APIC backing page is not
+ * present. Notify GPA of the vCPU's APIC backing page to the
+ * hypervisor by calling savic_register_gpa(). Before executing
+ * VMRUN, the hypervisor makes use of this information to make sure
+ * the APIC backing page is mapped in NPT.
+ */
+ res = savic_register_gpa(gpa);
+ if (res != ES_OK)
+ snp_abort();
+}
+
static int savic_probe(void)
{
if (!cc_platform_has(CC_ATTR_SNP_SECURE_AVIC))
@@ -30,6 +67,10 @@ static int savic_probe(void)
/* unreachable */
}
+ apic_page = alloc_percpu(struct apic_page);
+ if (!apic_page)
+ snp_abort();
+
return 1;
}
@@ -38,6 +79,7 @@ static struct apic apic_x2apic_savic __ro_after_init = {
.name = "secure avic x2apic",
.probe = savic_probe,
.acpi_madt_oem_check = savic_acpi_madt_oem_check,
+ .setup = savic_setup,
.dest_mode_logical = false,