@@ -1382,14 +1382,10 @@ static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool wri
return ES_OK;
}
-static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+static enum es_result __vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write)
{
struct pt_regs *regs = ctxt->regs;
enum es_result ret;
- bool write;
-
- /* Is it a WRMSR? */
- write = ctxt->insn.opcode.bytes[1] == 0x30;
switch (regs->cx) {
case MSR_SVSM_CAA:
@@ -1419,6 +1415,39 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ret;
}
+static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+{
+ return __vc_handle_msr(ghcb, ctxt, ctxt->insn.opcode.bytes[1] == 0x30);
+}
+
+void savic_ghcb_msr_write(u32 reg, u64 value)
+{
+ u64 msr = APIC_BASE_MSR + (reg >> 4);
+ struct pt_regs regs = {
+ .cx = msr,
+ .ax = lower_32_bits(value),
+ .dx = upper_32_bits(value)
+ };
+ struct es_em_ctxt ctxt = { .regs = ®s };
+ struct ghcb_state state;
+ enum es_result res;
+ struct ghcb *ghcb;
+
+ guard(irqsave)();
+
+ ghcb = __sev_get_ghcb(&state);
+ vc_ghcb_invalidate(ghcb);
+
+ res = __vc_handle_msr(ghcb, &ctxt, true);
+ if (res != ES_OK) {
+ pr_err("Secure AVIC msr (0x%llx) write returned error (%d)\n", msr, res);
+ /* MSR writes should never fail. Any failure is fatal error for SNP guest */
+ snp_abort();
+ }
+
+ __sev_put_ghcb(&state);
+}
+
enum es_result savic_register_gpa(u64 gpa)
{
struct ghcb_state state;
@@ -521,6 +521,7 @@ int snp_svsm_vtpm_send_command(u8 *buffer);
void __init snp_secure_tsc_prepare(void);
void __init snp_secure_tsc_init(void);
enum es_result savic_register_gpa(u64 gpa);
+void savic_ghcb_msr_write(u32 reg, u64 value);
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{
@@ -572,6 +573,7 @@ static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; }
static inline void __init snp_secure_tsc_prepare(void) { }
static inline void __init snp_secure_tsc_init(void) { }
static inline enum es_result savic_register_gpa(u64 gpa) { return ES_UNSUPPORTED; }
+static inline void savic_ghcb_msr_write(u32 reg, u64 value) { }
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@@ -7,6 +7,7 @@
* Author: Neeraj Upadhyay <Neeraj.Upadhyay@amd.com>
*/
+#include <linux/cpumask.h>
#include <linux/cc_platform.h>
#include <linux/percpu-defs.h>
#include <linux/align.h>
@@ -136,6 +137,17 @@ static u32 savic_read(u32 reg)
#define SAVIC_NMI_REQ 0x278
+static inline void self_ipi_reg_write(unsigned int vector)
+{
+ /*
+ * Secure AVIC hardware accelerates guest's MSR write to SELF_IPI
+ * register. It updates the IRR in the APIC backing page, evaluates
+ * the new IRR for interrupt injection and continues with guest
+ * code execution.
+ */
+ native_apic_msr_write(APIC_SELF_IPI, vector);
+}
+
static void savic_write(u32 reg, u32 data)
{
switch (reg) {
@@ -144,7 +156,6 @@ static void savic_write(u32 reg, u32 data)
case APIC_LVT1:
case APIC_TMICT:
case APIC_TDCR:
- case APIC_SELF_IPI:
case APIC_TASKPRI:
case APIC_EOI:
case APIC_SPIV:
@@ -160,6 +171,9 @@ static void savic_write(u32 reg, u32 data)
case APIC_EILVTn(0) ... APIC_EILVTn(3):
set_reg(reg, data);
break;
+ case APIC_SELF_IPI:
+ self_ipi_reg_write(data);
+ break;
/* ALLOWED_IRR offsets are writable */
case SAVIC_ALLOWED_IRR ... SAVIC_ALLOWED_IRR + 0x70:
if (IS_ALIGNED(reg - SAVIC_ALLOWED_IRR, 16)) {
@@ -172,6 +186,116 @@ static void savic_write(u32 reg, u32 data)
}
}
+static void send_ipi_dest(unsigned int cpu, unsigned int vector)
+{
+ update_vector(cpu, APIC_IRR, vector, true);
+}
+
+static void send_ipi_allbut(unsigned int vector)
+{
+ unsigned int cpu, src_cpu;
+
+ guard(irqsave)();
+
+ src_cpu = raw_smp_processor_id();
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (cpu == src_cpu)
+ continue;
+ send_ipi_dest(cpu, vector);
+ }
+}
+
+static inline void self_ipi(unsigned int vector)
+{
+ u32 icr_low = APIC_SELF_IPI | vector;
+
+ native_x2apic_icr_write(icr_low, 0);
+}
+
+static void savic_icr_write(u32 icr_low, u32 icr_high)
+{
+ unsigned int dsh, vector;
+ u64 icr_data;
+
+ dsh = icr_low & APIC_DEST_ALLBUT;
+ vector = icr_low & APIC_VECTOR_MASK;
+
+ switch (dsh) {
+ case APIC_DEST_SELF:
+ self_ipi(vector);
+ break;
+ case APIC_DEST_ALLINC:
+ self_ipi(vector);
+ fallthrough;
+ case APIC_DEST_ALLBUT:
+ send_ipi_allbut(vector);
+ break;
+ default:
+ send_ipi_dest(icr_high, vector);
+ break;
+ }
+
+ icr_data = ((u64)icr_high) << 32 | icr_low;
+ if (dsh != APIC_DEST_SELF)
+ savic_ghcb_msr_write(APIC_ICR, icr_data);
+}
+
+static void send_ipi(u32 dest, unsigned int vector, unsigned int dsh)
+{
+ unsigned int icr_low;
+
+ icr_low = __prepare_ICR(dsh, vector, APIC_DEST_PHYSICAL);
+ savic_icr_write(icr_low, dest);
+}
+
+static void savic_send_ipi(int cpu, int vector)
+{
+ u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
+
+ send_ipi(dest, vector, 0);
+}
+
+static void send_ipi_mask(const struct cpumask *mask, unsigned int vector, bool excl_self)
+{
+ unsigned int cpu, this_cpu;
+
+ guard(irqsave)();
+
+ this_cpu = raw_smp_processor_id();
+
+ for_each_cpu(cpu, mask) {
+ if (excl_self && cpu == this_cpu)
+ continue;
+ send_ipi(per_cpu(x86_cpu_to_apicid, cpu), vector, 0);
+ }
+}
+
+static void savic_send_ipi_mask(const struct cpumask *mask, int vector)
+{
+ send_ipi_mask(mask, vector, false);
+}
+
+static void savic_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
+{
+ send_ipi_mask(mask, vector, true);
+}
+
+static void savic_send_ipi_allbutself(int vector)
+{
+ send_ipi(0, vector, APIC_DEST_ALLBUT);
+}
+
+static void savic_send_ipi_all(int vector)
+{
+ send_ipi(0, vector, APIC_DEST_ALLINC);
+}
+
+static void savic_send_ipi_self(int vector)
+{
+ self_ipi_reg_write(vector);
+}
+
static void savic_update_vector(unsigned int cpu, unsigned int vector, bool set)
{
update_vector(cpu, SAVIC_ALLOWED_IRR, vector, set);
@@ -251,13 +375,20 @@ static struct apic apic_x2apic_savic __ro_after_init = {
.calc_dest_apicid = apic_default_calc_apicid,
+ .send_IPI = savic_send_ipi,
+ .send_IPI_mask = savic_send_ipi_mask,
+ .send_IPI_mask_allbutself = savic_send_ipi_mask_allbutself,
+ .send_IPI_allbutself = savic_send_ipi_allbutself,
+ .send_IPI_all = savic_send_ipi_all,
+ .send_IPI_self = savic_send_ipi_self,
+
.nmi_to_offline_cpu = true,
.read = savic_read,
.write = savic_write,
.eoi = native_apic_msr_eoi,
.icr_read = native_x2apic_icr_read,
- .icr_write = native_x2apic_icr_write,
+ .icr_write = savic_icr_write,
.update_vector = savic_update_vector,
};