@@ -159,9 +159,13 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u64 avic_physical_id; /* Offset 0xf8 */
u8 reserved_7[8];
u64 vmsa_pa; /* Used for an SEV-ES guest */
- u8 reserved_8[40];
+ u8 reserved_8[36];
+ u8 update_irr; /* Offset 0x134 */
+ u8 reserved_9[3];
u64 allowed_sev_features; /* Offset 0x138 */
- u8 reserved_9[672];
+ u8 reserved_10[16];
+ u32 requested_irr[8]; /* Offset 0x150 */
+ u8 reserved_11[624];
/*
* Offset 0x3e0, 32 bytes reserved
* for use by hypervisor/software.
@@ -47,6 +47,9 @@ static int pending_userspace_extint(struct kvm_vcpu *v)
*/
int kvm_cpu_has_extint(struct kvm_vcpu *v)
{
+ if (v->arch.apic->guest_apic_protected)
+ return 0;
+
/*
* FIXME: interrupt.injected represents an interrupt whose
* side-effects have already been applied (e.g. bit from IRR
@@ -2975,11 +2975,16 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
if (!kvm_apic_present(vcpu))
return -1;
- if (apic->guest_apic_protected)
+ switch (apic->guest_apic_protected) {
+ case APIC_STATE_PROTECTED_POSTED_INTR:
return -1;
-
- __apic_update_ppr(apic, &ppr);
- return apic_has_interrupt_for_ppr(apic, ppr);
+ case APIC_STATE_PROTECTED_INJECTED_INTR:
+ return apic_search_irr(apic);
+ case APIC_STATE_UNPROTECTED:
+ default:
+ __apic_update_ppr(apic, &ppr);
+ return apic_has_interrupt_for_ppr(apic, ppr);
+ }
}
EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
@@ -34,6 +34,7 @@
#include "svm_ops.h"
#include "cpuid.h"
#include "trace.h"
+#include "lapic.h"
#define GHCB_VERSION_MAX 2ULL
#define GHCB_VERSION_DEFAULT 2ULL
@@ -4986,3 +4987,41 @@ int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
return level;
}
+
+void sev_savic_set_requested_irr(struct vcpu_svm *svm, bool reinjected)
+{
+ struct kvm_lapic *apic;
+ bool has_interrupts;
+ int vec, vec_pos;
+ u32 val;
+ int i;
+
+ /* Secure AVIC HW takes care of re-injection */
+ if (reinjected)
+ return;
+
+ apic = svm->vcpu.arch.apic;
+ has_interrupts = false;
+
+ for (i = 0; i < ARRAY_SIZE(svm->vmcb->control.requested_irr); i++) {
+ val = __kvm_lapic_get_reg(apic->regs, APIC_IRR + i * 0x10);
+ if (!val)
+ continue;
+ has_interrupts = true;
+ svm->vmcb->control.requested_irr[i] |= val;
+ do {
+ vec_pos = __ffs(val);
+ vec = (i << 5) + vec_pos;
+ kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
+ val = val & ~BIT(vec_pos);
+ } while (val);
+ }
+
+ if (has_interrupts)
+ svm->vmcb->control.update_irr |= BIT(0);
+}
+
+bool sev_savic_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+ return kvm_apic_has_interrupt(vcpu) != -1;
+}
@@ -50,6 +50,8 @@
#include "svm.h"
#include "svm_ops.h"
+#include "lapic.h"
+
#include "kvm_onhyperv.h"
#include "svm_onhyperv.h"
@@ -3679,6 +3681,9 @@ static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
struct vcpu_svm *svm = to_svm(vcpu);
u32 type;
+ if (sev_savic_active(vcpu->kvm))
+ return sev_savic_set_requested_irr(svm, reinjected);
+
if (vcpu->arch.interrupt.soft) {
if (svm_update_soft_interrupt_rip(vcpu))
return;
@@ -3860,6 +3865,9 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (sev_savic_active(vcpu->kvm))
+ return 1;
+
if (svm->nested.nested_run_pending)
return -EBUSY;
@@ -3880,6 +3888,9 @@ static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (sev_savic_active(vcpu->kvm))
+ return;
+
/*
* In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
* 1, because that's a separate STGI/VMRUN intercept. The next time we
@@ -5092,6 +5103,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.apicv_post_state_restore = avic_apicv_post_state_restore,
.required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
+ .protected_apic_has_interrupt = sev_savic_has_pending_interrupt,
+
.get_exit_info = svm_get_exit_info,
.get_entry_info = svm_get_entry_info,
@@ -760,6 +760,8 @@ static inline bool sev_savic_active(struct kvm *kvm)
{
return to_kvm_sev_info(kvm)->vmsa_features & SVM_SEV_FEAT_SECURE_AVIC;
}
+void sev_savic_set_requested_irr(struct vcpu_svm *svm, bool reinjected);
+bool sev_savic_has_pending_interrupt(struct kvm_vcpu *vcpu);
#else
static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
{
@@ -791,7 +793,8 @@ static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
return 0;
}
static inline bool sev_savic_active(struct kvm *kvm) { return false; }
-
+static inline void sev_savic_set_requested_irr(struct vcpu_svm *svm, bool reinjected) {}
+static inline bool sev_savic_has_pending_interrupt(struct kvm_vcpu *vcpu) { return false; }
#endif
/* vmenter.S */
@@ -10426,7 +10426,17 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
if (r < 0)
goto out;
if (r) {
- int irq = kvm_cpu_get_interrupt(vcpu);
+ int irq;
+
+ /*
+ * Do not ack the interrupt here for APIC_STATE_PROTECTED_INJECTED_INTR.
+ * ->inject_irq reads the APIC_IRR state and clears it.
+ */
+ if (vcpu->arch.apic->guest_apic_protected ==
+ APIC_STATE_PROTECTED_INJECTED_INTR)
+ irq = kvm_apic_has_interrupt(vcpu);
+ else
+ irq = kvm_cpu_get_interrupt(vcpu);
if (!WARN_ON_ONCE(irq == -1)) {
kvm_queue_interrupt(vcpu, irq, false);