@@ -6442,29 +6442,16 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
vmx->loaded_vmcs->entry_time));
}
-static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
- u32 idt_vectoring_info,
- int instr_len_field,
- int error_code_field)
+void vmx_process_injected_event(struct kvm_vcpu *vcpu,
+ u32 idt_vectoring_info,
+ u32 instr_len,
+ u32 error_code)
{
- u8 vector;
- int type;
- bool idtv_info_valid;
-
- idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
-
- vcpu->arch.nmi_injected = false;
- kvm_clear_exception_queue(vcpu);
- kvm_clear_interrupt_queue(vcpu);
-
- if (!idtv_info_valid)
- return;
+ u8 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
+ u32 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
kvm_make_request(KVM_REQ_EVENT, vcpu);
- vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
- type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
-
switch (type) {
case INTR_TYPE_NMI_INTR:
vcpu->arch.nmi_injected = true;
@@ -6476,17 +6463,16 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
vmx_set_nmi_mask(vcpu, false);
break;
case INTR_TYPE_SOFT_EXCEPTION:
- vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
+ vcpu->arch.event_exit_inst_len = instr_len;
fallthrough;
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
- u32 err = vmcs_read32(error_code_field);
- kvm_requeue_exception_e(vcpu, vector, err);
+ kvm_requeue_exception_e(vcpu, vector, error_code);
} else
kvm_requeue_exception(vcpu, vector);
break;
case INTR_TYPE_SOFT_INTR:
- vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
+ vcpu->arch.event_exit_inst_len = instr_len;
fallthrough;
case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
@@ -6496,6 +6482,34 @@ static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
}
}
+static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
+ u32 idt_vectoring_info,
+ int instr_len_field,
+ int error_code_field)
+{
+ u32 instr_len = 0, err_code = 0;
+ u32 type;
+
+ vcpu->arch.nmi_injected = false;
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
+
+ if (!(idt_vectoring_info & VECTORING_INFO_VALID_MASK))
+ return;
+
+ type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
+
+ if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK)
+ err_code = vmcs_read32(error_code_field);
+
+ if (type == INTR_TYPE_SOFT_EXCEPTION || type == INTR_TYPE_SOFT_INTR)
+ instr_len = vmcs_read32(instr_len_field);
+
+ vmx_process_injected_event(vcpu, idt_vectoring_info, instr_len,
+ err_code);
+}
+
+
static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
{
__vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
@@ -336,6 +336,10 @@ bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
+void vmx_process_injected_event(struct kvm_vcpu *vcpu,
+ u32 idt_vectoring_info,
+ u32 instr_len,
+ u32 error_code);
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
Refactor the logic that is dealing with parsing of an injected event to a separate function. This will be used in the next patch to deal with the events that L1 wants to inject to L2 in a way that survives migration. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> --- arch/x86/kvm/vmx/vmx.c | 60 ++++++++++++++++++++++++++---------------- arch/x86/kvm/vmx/vmx.h | 4 +++ 2 files changed, 41 insertions(+), 23 deletions(-)