@@ -1523,7 +1523,7 @@ static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t r
return 0;
}
-static int _svm_cpu_up(bool bsp)
+static int svm_cpu_up(bool bsp)
{
uint64_t msr_content;
int rc;
@@ -1538,7 +1538,7 @@ static int _svm_cpu_up(bool bsp)
return -EINVAL;
}
- if ( (rc = svm_cpu_up_prepare(cpu)) != 0 )
+ if ( bsp && (rc = svm_cpu_up_prepare(cpu)) != 0 )
return rc;
write_efer(read_efer() | EFER_SVME);
@@ -1578,18 +1578,13 @@ static int _svm_cpu_up(bool bsp)
return 0;
}
-static int svm_cpu_up(void)
-{
- return _svm_cpu_up(false);
-}
-
const struct hvm_function_table * __init start_svm(void)
{
bool_t printed = 0;
svm_host_osvw_reset();
- if ( _svm_cpu_up(true) )
+ if ( svm_cpu_up(true) )
{
printk("SVM: failed to initialise.\n");
return NULL;
@@ -603,7 +603,7 @@ void vmx_cpu_dead(unsigned int cpu)
vmx_pi_desc_fixup(cpu);
}
-int vmx_cpu_up(void)
+int vmx_cpu_up(bool bsp)
{
u32 eax, edx;
int rc, bios_locked, cpu = smp_processor_id();
@@ -652,7 +652,7 @@ int vmx_cpu_up(void)
INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
- if ( (rc = vmx_cpu_up_prepare(cpu)) != 0 )
+ if ( bsp && (rc = vmx_cpu_up_prepare(cpu)) != 0 )
return rc;
switch ( __vmxon(this_cpu(vmxon_region)) )
@@ -2433,7 +2433,7 @@ const struct hvm_function_table * __init start_vmx(void)
{
set_in_cr4(X86_CR4_VMXE);
- if ( vmx_cpu_up() )
+ if ( vmx_cpu_up(true) )
{
printk("VMX: failed to initialise.\n");
return NULL;
@@ -158,7 +158,7 @@ struct hvm_function_table {
int (*cpu_up_prepare)(unsigned int cpu);
void (*cpu_dead)(unsigned int cpu);
- int (*cpu_up)(void);
+ int (*cpu_up)(bool bsp);
void (*cpu_down)(void);
/* Copy up to 15 bytes from cached instruction bytes at current rIP. */
@@ -443,7 +443,7 @@ void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
static inline int hvm_cpu_up(void)
{
- return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 0);
+ return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up(false) : 0);
}
static inline void hvm_cpu_down(void)
@@ -25,7 +25,7 @@ extern void vmcs_dump_vcpu(struct vcpu *v);
extern void setup_vmcs_dump(void);
extern int vmx_cpu_up_prepare(unsigned int cpu);
extern void vmx_cpu_dead(unsigned int cpu);
-extern int vmx_cpu_up(void);
+extern int vmx_cpu_up(bool bsp);
extern void vmx_cpu_down(void);
struct vmcs_struct {
These routines are first called via CPU_UP_PREPARE notifier by the BSP and then by the booting ASP from vmx_cpu_up()/_svm_cpu_up(). Avoid the unnecessary second call. Becasue BSP doesn't go through CPU_UP_PREPARE it is a special case and so we pass 'bsp' flag to hvm_funcs.cpu_up() to help it decide whether or not to call vmx/svm_cpu_up_prepare(). Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reported-by: Andrew Cooper <andrew.cooper3@citrix.com> --- V3: * Reverted to v1-style patch, without assumption that BSP is always CPU0. Problem with V2 was that on Intel vmx_cpu_up_prepare() needs to be called after vmx_init_vmcs_config(): the latter sets vmcs_revision_id, used later by vmx_alloc_vmcs(). xen/arch/x86/hvm/svm/svm.c | 11 +++-------- xen/arch/x86/hvm/vmx/vmcs.c | 4 ++-- xen/arch/x86/hvm/vmx/vmx.c | 2 +- xen/include/asm-x86/hvm/hvm.h | 4 ++-- xen/include/asm-x86/hvm/vmx/vmcs.h | 2 +- 5 files changed, 9 insertions(+), 14 deletions(-)