Message ID | 20170706230323.29952-2-bsd@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
> /* > * The exit handlers return 1 if the exit was handled fully and guest execution > * may resume. Otherwise they set the kvm_run parameter to indicate what needs > @@ -7790,6 +7806,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { > [EXIT_REASON_XSAVES] = handle_xsaves, > [EXIT_REASON_XRSTORS] = handle_xrstors, > [EXIT_REASON_PML_FULL] = handle_pml_full, > + [EXIT_REASON_VMFUNC] = handle_vmfunc, > [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, > }; > > @@ -8111,6 +8128,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) > case EXIT_REASON_PML_FULL: > /* We emulate PML support to L1. */ > return false; > + case EXIT_REASON_VMFUNC: > + /* VM functions are emulated through L2->L0 vmexits. */ > + return false; This would fit better into the second patch. > default: > return true; > } > Looks good to me.
On 10/07/2017 10:54, David Hildenbrand wrote: > >> /* >> * The exit handlers return 1 if the exit was handled fully and guest execution >> * may resume. Otherwise they set the kvm_run parameter to indicate what needs >> @@ -7790,6 +7806,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { >> [EXIT_REASON_XSAVES] = handle_xsaves, >> [EXIT_REASON_XRSTORS] = handle_xrstors, >> [EXIT_REASON_PML_FULL] = handle_pml_full, >> + [EXIT_REASON_VMFUNC] = handle_vmfunc, >> [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, >> }; >> >> @@ -8111,6 +8128,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) >> case EXIT_REASON_PML_FULL: >> /* We emulate PML support to L1. */ >> return false; >> + case EXIT_REASON_VMFUNC: >> + /* VM functions are emulated through L2->L0 vmexits. */ >> + return false; > > This would fit better into the second patch. It depends on how you reason about it. I put it here because: - until this patch, EXIT_REASON_VMFUNC should never be generated. We don't even know that it exists. - after this patch, it should still never be generated in nested scenarios. However, if it did because of a bug, we're in a better place to handle it than L1 (because as far as L1 knows, it should never be generated). Perhaps this is an argument in favor of changing the default case of nested_vmx_exit_handled from true to false. Paolo >> default: >> return true; >> } >> > > > Looks good to me. >
On 10.07.2017 11:17, Paolo Bonzini wrote: > > > On 10/07/2017 10:54, David Hildenbrand wrote: >> >>> /* >>> * The exit handlers return 1 if the exit was handled fully and guest execution >>> * may resume. Otherwise they set the kvm_run parameter to indicate what needs >>> @@ -7790,6 +7806,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { >>> [EXIT_REASON_XSAVES] = handle_xsaves, >>> [EXIT_REASON_XRSTORS] = handle_xrstors, >>> [EXIT_REASON_PML_FULL] = handle_pml_full, >>> + [EXIT_REASON_VMFUNC] = handle_vmfunc, >>> [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, >>> }; >>> >>> @@ -8111,6 +8128,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) >>> case EXIT_REASON_PML_FULL: >>> /* We emulate PML support to L1. */ >>> return false; >>> + case EXIT_REASON_VMFUNC: >>> + /* VM functions are emulated through L2->L0 vmexits. */ >>> + return false; >> >> This would fit better into the second patch. > > It depends on how you reason about it. I put it here because: > > - until this patch, EXIT_REASON_VMFUNC should never be generated. We > don't even know that it exists. > > - after this patch, it should still never be generated in nested > scenarios. However, if it did because of a bug, we're in a better place > to handle it than L1 (because as far as L1 knows, it should never be > generated). > > Perhaps this is an argument in favor of changing the default case of > nested_vmx_exit_handled from true to false. I remember having the same discussion before :) I still think the default should be changed (then we don't need nVMX hunks in VMX patches ;) ). Anyhow Reviewed-by: David Hildenbrand <david@redhat.com> > > Paolo
On 10/07/2017 11:20, David Hildenbrand wrote: >> Perhaps this is an argument in favor of changing the default case of >> nested_vmx_exit_handled from true to false. > > I remember having the same discussion before :) I still think the > default should be changed (then we don't need nVMX hunks in VMX patches > ;) ). Yeah, that was my point. Patches welcome! ;) Paolo
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 35cd06f..da5375e 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -72,6 +72,7 @@ #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400 #define SECONDARY_EXEC_RDRAND 0x00000800 #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000 +#define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000 #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000 #define SECONDARY_EXEC_RDSEED 0x00010000 #define SECONDARY_EXEC_ENABLE_PML 0x00020000 @@ -187,6 +188,8 @@ enum vmcs_field { APIC_ACCESS_ADDR_HIGH = 0x00002015, POSTED_INTR_DESC_ADDR = 0x00002016, POSTED_INTR_DESC_ADDR_HIGH = 0x00002017, + VM_FUNCTION_CONTROL = 0x00002018, + VM_FUNCTION_CONTROL_HIGH = 0x00002019, EPT_POINTER = 0x0000201a, EPT_POINTER_HIGH = 0x0000201b, EOI_EXIT_BITMAP0 = 0x0000201c, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ca5d2b9..a483b49 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1314,6 +1314,12 @@ static inline bool cpu_has_vmx_tsc_scaling(void) SECONDARY_EXEC_TSC_SCALING; } +static inline bool cpu_has_vmx_vmfunc(void) +{ + return vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENABLE_VMFUNC; +} + static inline bool report_flexpriority(void) { return flexpriority_enabled; @@ -3575,7 +3581,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_SHADOW_VMCS | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_ENABLE_PML | - SECONDARY_EXEC_TSC_SCALING; + SECONDARY_EXEC_TSC_SCALING | + SECONDARY_EXEC_ENABLE_VMFUNC; if (adjust_vmx_controls(min2, opt2, MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) @@ -5233,6 +5240,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ #endif + if (cpu_has_vmx_vmfunc()) + vmcs_write64(VM_FUNCTION_CONTROL, 0); + vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); @@ -7740,6 +7750,12 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu) return 1; } +static int handle_vmfunc(struct kvm_vcpu *vcpu) +{ + kvm_queue_exception(vcpu, UD_VECTOR); + return 1; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -7790,6 +7806,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { [EXIT_REASON_XSAVES] = handle_xsaves, [EXIT_REASON_XRSTORS] = handle_xrstors, [EXIT_REASON_PML_FULL] = handle_pml_full, + [EXIT_REASON_VMFUNC] = handle_vmfunc, [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, }; @@ -8111,6 +8128,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) case EXIT_REASON_PML_FULL: /* We emulate PML support to L1. */ return false; + case EXIT_REASON_VMFUNC: + /* VM functions are emulated through L2->L0 vmexits. */ + return false; default: return true; }