@@ -135,6 +135,8 @@
#define APIC_TDR_DIV_128 0xA
#define APIC_EFEAT 0x400
#define APIC_ECTRL 0x410
+#define APIC_SEOI 0x420
+#define APIC_IER 0x480
#define APIC_EILVTn(n) (0x500 + 0x10 * n)
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4
@@ -9,6 +9,7 @@
#include <linux/cc_platform.h>
#include <linux/percpu-defs.h>
+#include <linux/align.h>
#include <asm/apic.h>
#include <asm/sev.h>
@@ -32,6 +33,117 @@ static int savic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return x2apic_enabled() && cc_platform_has(CC_ATTR_SNP_SECURE_AVIC);
}
+static __always_inline u32 get_reg(unsigned int offset)
+{
+ return READ_ONCE(this_cpu_ptr(apic_page)->regs[offset >> 2]);
+}
+
+static __always_inline void set_reg(unsigned int offset, u32 val)
+{
+ WRITE_ONCE(this_cpu_ptr(apic_page)->regs[offset >> 2], val);
+}
+
+#define SAVIC_ALLOWED_IRR 0x204
+
+static u32 savic_read(u32 reg)
+{
+ /*
+ * When Secure AVIC is enabled, rdmsr/wrmsr of APIC registers
+ * result in VC exception (for non-accelerated register accesses)
+ * with VMEXIT_AVIC_NOACCEL error code. The VC exception handler
+ * can read/write the x2APIC register in the guest APIC backing page.
+ * Since doing this would increase the latency of accessing x2APIC
+ * registers, instead of doing rdmsr/wrmsr based accesses and
+ * handling apic register reads/writes in VC exception, the read()
+ * and write() callbacks directly read/write APIC register from/to
+ * the vCPU APIC backing page.
+ */
+ switch (reg) {
+ case APIC_LVTT:
+ case APIC_TMICT:
+ case APIC_TMCCT:
+ case APIC_TDCR:
+ case APIC_ID:
+ case APIC_LVR:
+ case APIC_TASKPRI:
+ case APIC_ARBPRI:
+ case APIC_PROCPRI:
+ case APIC_LDR:
+ case APIC_SPIV:
+ case APIC_ESR:
+ case APIC_ICR:
+ case APIC_LVTTHMR:
+ case APIC_LVTPC:
+ case APIC_LVT0:
+ case APIC_LVT1:
+ case APIC_LVTERR:
+ case APIC_EFEAT:
+ case APIC_ECTRL:
+ case APIC_SEOI:
+ case APIC_IER:
+ case APIC_EILVTn(0) ... APIC_EILVTn(3):
+ return get_reg(reg);
+ case APIC_ISR ... APIC_ISR + 0x70:
+ case APIC_TMR ... APIC_TMR + 0x70:
+ if (WARN_ONCE(!IS_ALIGNED(reg, 16),
+ "APIC reg read offset 0x%x not aligned at 16 bytes", reg))
+ return 0;
+ return get_reg(reg);
+ /* IRR and ALLOWED_IRR offset range */
+ case APIC_IRR ... APIC_IRR + 0x74:
+ /*
+ * Either aligned at 16 bytes for valid IRR reg offset or a
+ * valid Secure AVIC ALLOWED_IRR offset.
+ */
+ if (WARN_ONCE(!(IS_ALIGNED(reg, 16) ||
+ IS_ALIGNED(reg - SAVIC_ALLOWED_IRR, 16)),
+ "Misaligned IRR/ALLOWED_IRR APIC reg read offset 0x%x", reg))
+ return 0;
+ return get_reg(reg);
+ default:
+ pr_err("Permission denied: read of Secure AVIC reg offset 0x%x\n", reg);
+ return 0;
+ }
+}
+
+#define SAVIC_NMI_REQ 0x278
+
+static void savic_write(u32 reg, u32 data)
+{
+ switch (reg) {
+ case APIC_LVTT:
+ case APIC_LVT0:
+ case APIC_LVT1:
+ case APIC_TMICT:
+ case APIC_TDCR:
+ case APIC_SELF_IPI:
+ case APIC_TASKPRI:
+ case APIC_EOI:
+ case APIC_SPIV:
+ case SAVIC_NMI_REQ:
+ case APIC_ESR:
+ case APIC_ICR:
+ case APIC_LVTTHMR:
+ case APIC_LVTPC:
+ case APIC_LVTERR:
+ case APIC_ECTRL:
+ case APIC_SEOI:
+ case APIC_IER:
+ case APIC_EILVTn(0) ... APIC_EILVTn(3):
+ set_reg(reg, data);
+ break;
+ /* ALLOWED_IRR offsets are writable */
+ case SAVIC_ALLOWED_IRR ... SAVIC_ALLOWED_IRR + 0x70:
+ if (IS_ALIGNED(reg - SAVIC_ALLOWED_IRR, 16)) {
+ set_reg(reg, data);
+ break;
+ }
+ fallthrough;
+ default:
+ pr_err("Permission denied: write to Secure AVIC reg offset 0x%x\n", reg);
+ }
+}
+
static void savic_setup(void)
{
void *backing_page;
@@ -95,8 +207,8 @@ static struct apic apic_x2apic_savic __ro_after_init = {
.nmi_to_offline_cpu = true,
- .read = native_apic_msr_read,
- .write = native_apic_msr_write,
+ .read = savic_read,
+ .write = savic_write,
.eoi = native_apic_msr_eoi,
.icr_read = native_x2apic_icr_read,
.icr_write = native_x2apic_icr_write,