@@ -418,7 +418,7 @@ static void vt_cancel_injection(struct kvm_vcpu *vcpu)
static int vt_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
if (is_td_vcpu(vcpu))
- return true;
+ return tdx_interrupt_allowed(vcpu);
return vmx_interrupt_allowed(vcpu, for_injection);
}
@@ -203,7 +203,8 @@ void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
return;
if (kvm_vcpu_is_blocking(vcpu) &&
- (is_td_vcpu(vcpu) || !vmx_interrupt_blocked(vcpu)))
+ ((is_td_vcpu(vcpu) && tdx_interrupt_allowed(vcpu)) ||
+ (!is_td_vcpu(vcpu) && !vmx_interrupt_blocked(vcpu))))
pi_enable_wakeup_handler(vcpu);
/*
@@ -720,9 +720,39 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
local_irq_enable();
}
+bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /*
+ * KVM can't get the interrupt status of TDX guest and it assumes
+ * interrupt is always allowed unless TDX guest calls TDVMCALL with HLT,
+ * which passes the interrupt blocked flag.
+ */
+ return vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT ||
+ !to_tdx(vcpu)->vp_enter_args.r12;
+}
+
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
- return pi_has_pending_interrupt(vcpu);
+ u64 vcpu_state_details;
+
+ if (pi_has_pending_interrupt(vcpu))
+ return true;
+
+ /*
+ * Only check RVI pending for HALTED case with IRQ enabled.
+ * For non-HLT cases, KVM doesn't care about STI/SS shadows. And if the
+ * interrupt was pending before TD exit, then it _must_ be blocked,
+ * otherwise the interrupt would have been serviced at the instruction
+ * boundary.
+ */
+ if (vmx_get_exit_reason(vcpu).basic != EXIT_REASON_HLT ||
+ to_tdx(vcpu)->vp_enter_args.r12)
+ return false;
+
+ vcpu_state_details =
+ td_state_non_arch_read64(to_tdx(vcpu), TD_VCPU_STATE_DETAILS_NON_ARCH);
+
+ return tdx_vcpu_state_details_intr_pending(vcpu_state_details);
}
/*
@@ -846,6 +876,7 @@ static __always_inline u32 tdcall_to_vmx_exit_reason(struct kvm_vcpu *vcpu)
{
switch (tdvmcall_leaf(vcpu)) {
case EXIT_REASON_CPUID:
+ case EXIT_REASON_HLT:
case EXIT_REASON_IO_INSTRUCTION:
return tdvmcall_leaf(vcpu);
case EXIT_REASON_EPT_VIOLATION:
@@ -1103,9 +1134,7 @@ static int tdx_complete_vmcall_map_gpa(struct kvm_vcpu *vcpu)
/*
* Stop processing the remaining part if there is a pending interrupt,
* which could be qualified to deliver. Skip checking pending RVI for
- * TDVMCALL_MAP_GPA.
- * TODO: Add a comment to link the reason when the target function is
- * implemented.
+ * TDVMCALL_MAP_GPA, see comments in tdx_protected_apic_has_interrupt().
*/
if (kvm_vcpu_has_events(vcpu)) {
tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_RETRY);
@@ -1908,6 +1937,8 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
return 1;
case EXIT_REASON_CPUID:
return tdx_emulate_cpuid(vcpu);
+ case EXIT_REASON_HLT:
+ return kvm_emulate_halt_noskip(vcpu);
case EXIT_REASON_TDCALL:
return handle_tdvmcall(vcpu);
case EXIT_REASON_VMCALL:
@@ -121,6 +121,7 @@ static __always_inline void tdvps_vmcs_check(u32 field, u8 bits)
}
static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
+static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
#define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
@@ -168,11 +169,15 @@ static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \
tdh_vp_wr_failed(tdx, #uclass, " &= ~", field, bit, err);\
}
+
+bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu);
+
TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
+TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
#else
static inline int tdx_bringup(void) { return 0; }
@@ -188,6 +193,8 @@ struct vcpu_tdx {
struct kvm_vcpu vcpu;
};
+static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; }
+
#endif
#endif
@@ -36,6 +36,17 @@ enum tdx_tdcs_execution_control {
TD_TDCS_EXEC_TSC_OFFSET = 10,
};
+enum tdx_vcpu_guest_other_state {
+ TD_VCPU_STATE_DETAILS_NON_ARCH = 0x100,
+};
+
+#define TDX_VCPU_STATE_DETAILS_INTR_PENDING BIT_ULL(0)
+
+static inline bool tdx_vcpu_state_details_intr_pending(u64 vcpu_state_details)
+{
+ return !!(vcpu_state_details & TDX_VCPU_STATE_DETAILS_INTR_PENDING);
+}
+
/* @field is any of enum tdx_tdcs_execution_control */
#define TDCS_EXEC(field) BUILD_TDX_FIELD(TD_CLASS_EXECUTION_CONTROLS, (field))