@@ -240,6 +240,15 @@ static bool vt_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
return vmx_apic_init_signal_blocked(vcpu);
}
+static void vt_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+{
+ /* Only x2APIC mode is supported for TD. */
+ if (is_td_vcpu(vcpu))
+ return;
+
+ return vmx_set_virtual_apic_mode(vcpu);
+}
+
static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi = vcpu_to_pi_desc(vcpu);
@@ -248,6 +257,14 @@ static void vt_apicv_pre_state_restore(struct kvm_vcpu *vcpu)
memset(pi->pir, 0, sizeof(pi->pir));
}
+static void vt_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ return vmx_hwapic_isr_update(vcpu, max_isr);
+}
+
static int vt_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu))
@@ -437,6 +454,14 @@ static void vt_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
vmx_get_exit_info(vcpu, reason, info1, info2, intr_info, error_code);
}
+static void vt_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu))
+ return;
+
+ vmx_set_apic_access_page_addr(vcpu);
+}
+
static void vt_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
if (is_td_vcpu(vcpu)) {
@@ -561,13 +586,13 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.update_cr8_intercept = vmx_update_cr8_intercept,
.x2apic_icr_is_split = false,
- .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
- .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .set_virtual_apic_mode = vt_set_virtual_apic_mode,
+ .set_apic_access_page_addr = vt_set_apic_access_page_addr,
.refresh_apicv_exec_ctrl = vt_refresh_apicv_exec_ctrl,
.load_eoi_exitmap = vmx_load_eoi_exitmap,
.apicv_pre_state_restore = vt_apicv_pre_state_restore,
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
- .hwapic_isr_update = vmx_hwapic_isr_update,
+ .hwapic_isr_update = vt_hwapic_isr_update,
.sync_pir_to_irr = vt_sync_pir_to_irr,
.deliver_interrupt = vt_deliver_interrupt,
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,