diff mbox

[24/30] nVMX: Correct handling of idt vectoring info

Message ID 201105080827.p488RW9O018328@rice.haifa.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nadav Har'El May 8, 2011, 8:27 a.m. UTC
This patch adds correct handling of IDT_VECTORING_INFO_FIELD for the nested
case.

When a guest exits while handling an interrupt or exception, we get this
information in IDT_VECTORING_INFO_FIELD in the VMCS. When L2 exits to L1,
there's nothing we need to do, because L1 will see this field in vmcs12, and
handle it itself. However, when L2 exits and L0 handles the exit itself and
plans to return to L2, L0 must inject this event to L2.

In the normal non-nested case, the idt_vectoring_info case is discovered after
the exit, and the decision to inject (though not the injection itself) is made
at that point. However, in the nested case a decision of whether to return
to L2 or L1 also happens during the injection phase (see the previous
patches), so in the nested case we can only decide what to do about the
idt_vectoring_info right after the injection, i.e., in the beginning of
vmx_vcpu_run, which is the first time we know for sure if we're staying in
L2 (i.e., nested_mode is true).

Signed-off-by: Nadav Har'El <nyh@il.ibm.com>
---
 arch/x86/kvm/vmx.c |   32 ++++++++++++++++++++++++++++++++
 1 file changed, 32 insertions(+)

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Avi Kivity May 9, 2011, 11:04 a.m. UTC | #1
On 05/08/2011 11:27 AM, Nadav Har'El wrote:
> This patch adds correct handling of IDT_VECTORING_INFO_FIELD for the nested
> case.
>
> When a guest exits while handling an interrupt or exception, we get this
> information in IDT_VECTORING_INFO_FIELD in the VMCS. When L2 exits to L1,
> there's nothing we need to do, because L1 will see this field in vmcs12, and
> handle it itself. However, when L2 exits and L0 handles the exit itself and
> plans to return to L2, L0 must inject this event to L2.
>
> In the normal non-nested case, the idt_vectoring_info case is discovered after
> the exit, and the decision to inject (though not the injection itself) is made
> at that point. However, in the nested case a decision of whether to return
> to L2 or L1 also happens during the injection phase (see the previous
> patches), so in the nested case we can only decide what to do about the
> idt_vectoring_info right after the injection, i.e., in the beginning of
> vmx_vcpu_run, which is the first time we know for sure if we're staying in
> L2 (i.e., nested_mode is true).
>
> +static void nested_handle_valid_idt_vectoring_info(struct vcpu_vmx *vmx)
> +{
> +	int irq  = vmx->idt_vectoring_info&  VECTORING_INFO_VECTOR_MASK;
> +	int type = vmx->idt_vectoring_info&  VECTORING_INFO_TYPE_MASK;
> +	int errCodeValid = vmx->idt_vectoring_info&
> +		VECTORING_INFO_DELIVER_CODE_MASK;

Innovative coding style.

> +	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
> +		irq | type | INTR_INFO_VALID_MASK | errCodeValid);
> +

Why not do a 1:1 copy?

> +	vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
> +		vmx->nested.vm_exit_instruction_len);
> +	if (errCodeValid)
> +		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
> +			vmx->nested.idt_vectoring_error_code);
> +}
> +
>   #ifdef CONFIG_X86_64
>   #define R "r"
>   #define Q "q"
diff mbox

Patch

--- .before/arch/x86/kvm/vmx.c	2011-05-08 10:43:21.000000000 +0300
+++ .after/arch/x86/kvm/vmx.c	2011-05-08 10:43:21.000000000 +0300
@@ -352,6 +352,10 @@  struct nested_vmx {
 	u64 vmcs01_tsc_offset;
 	/* L2 must run next, and mustn't decide to exit to L1. */
 	bool nested_run_pending;
+	/* true if last exit was of L2, and had a valid idt_vectoring_info */
+	bool valid_idt_vectoring_info;
+	/* These are saved if valid_idt_vectoring_info */
+	u32 vm_exit_instruction_len, idt_vectoring_error_code;
 	/*
 	 * Guest pages referred to in vmcs02 with host-physical pointers, so
 	 * we must keep them pinned while L2 runs.
@@ -5736,6 +5740,22 @@  static void vmx_cancel_injection(struct 
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
 }
 
+static void nested_handle_valid_idt_vectoring_info(struct vcpu_vmx *vmx)
+{
+	int irq  = vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
+	int type = vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
+	int errCodeValid = vmx->idt_vectoring_info &
+		VECTORING_INFO_DELIVER_CODE_MASK;
+	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+		irq | type | INTR_INFO_VALID_MASK | errCodeValid);
+
+	vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+		vmx->nested.vm_exit_instruction_len);
+	if (errCodeValid)
+		vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+			vmx->nested.idt_vectoring_error_code);
+}
+
 #ifdef CONFIG_X86_64
 #define R "r"
 #define Q "q"
@@ -5748,6 +5768,9 @@  static void __noclone vmx_vcpu_run(struc
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+	if (is_guest_mode(vcpu) && vmx->nested.valid_idt_vectoring_info)
+		nested_handle_valid_idt_vectoring_info(vmx);
+
 	/* Record the guest's net vcpu time for enforced NMI injections. */
 	if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
 		vmx->entry_time = ktime_get();
@@ -5879,6 +5902,15 @@  static void __noclone vmx_vcpu_run(struc
 
 	vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
 
+	vmx->nested.valid_idt_vectoring_info = is_guest_mode(vcpu) &&
+		(vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
+	if (vmx->nested.valid_idt_vectoring_info) {
+		vmx->nested.vm_exit_instruction_len =
+			vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+		vmx->nested.idt_vectoring_error_code =
+			vmcs_read32(IDT_VECTORING_ERROR_CODE);
+	}
+
 	asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
 	vmx->launched = 1;