Message ID | 20210422161130.652779-7-vkuznets@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | i386: KVM: expand Hyper-V features early | expand |
On Thu, Apr 22, 2021 at 06:11:17PM +0200, Vitaly Kuznetsov wrote: > As a preparation to implementing hv_cpuid_cache intro introduce > hv_cpuid_get_host(). No functional change intended. > > Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> > --- > target/i386/kvm/kvm.c | 102 +++++++++++++++++++++++------------------- > 1 file changed, 57 insertions(+), 45 deletions(-) > > diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c > index ba093dba4d23..7aeb704b016e 100644 > --- a/target/i386/kvm/kvm.c > +++ b/target/i386/kvm/kvm.c > @@ -1107,6 +1107,19 @@ static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r) > return 0; > } > > +static uint32_t hv_cpuid_get_host(struct kvm_cpuid2 *cpuid, uint32_t func, > + int reg) > +{ > + struct kvm_cpuid_entry2 *entry; > + > + entry = cpuid_find_entry(cpuid, func, 0); > + if (!entry) { > + return 0; One possible difference here is that leaves not supported by the host will now be zeroed out. ...which I expected to be the correct behavior of hv-passthrough, so: Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> > + } > + > + return cpuid_entry_get_reg(entry, reg); > +} > + > static bool hyperv_feature_supported(struct kvm_cpuid2 *cpuid, int feature) > { > uint32_t r, fw, bits; > @@ -1203,7 +1216,7 @@ static int hyperv_handle_properties(CPUState *cs, > { > X86CPU *cpu = X86_CPU(cs); > struct kvm_cpuid2 *cpuid; > - struct kvm_cpuid_entry2 *c, *c2; > + struct kvm_cpuid_entry2 *c; > uint32_t cpuid_i = 0; > int r; > > @@ -1235,46 +1248,47 @@ static int hyperv_handle_properties(CPUState *cs, > } > > if (cpu->hyperv_passthrough) { > - c = cpuid_find_entry(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 0); > - if (c) { > - cpu->hyperv_vendor_id[0] = c->ebx; > - cpu->hyperv_vendor_id[1] = c->ecx; > - cpu->hyperv_vendor_id[2] = c->edx; > - cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, > - sizeof(cpu->hyperv_vendor_id) + 1); > - memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, > - sizeof(cpu->hyperv_vendor_id)); > - cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; > - } > - > - c = cpuid_find_entry(cpuid, HV_CPUID_INTERFACE, 0); > - if (c) { > - cpu->hyperv_interface_id[0] = c->eax; > - cpu->hyperv_interface_id[1] = c->ebx; > - cpu->hyperv_interface_id[2] = c->ecx; > - cpu->hyperv_interface_id[3] = c->edx; > - } > - > - c = cpuid_find_entry(cpuid, HV_CPUID_VERSION, 0); > - if (c) { > - cpu->hyperv_version_id[0] = c->eax; > - cpu->hyperv_version_id[1] = c->ebx; > - cpu->hyperv_version_id[2] = c->ecx; > - cpu->hyperv_version_id[3] = c->edx; > - } > - > - c = cpuid_find_entry(cpuid, HV_CPUID_IMPLEMENT_LIMITS, 0); > - if (c) { > - cpu->hv_max_vps = c->eax; > - cpu->hyperv_limits[0] = c->ebx; > - cpu->hyperv_limits[1] = c->ecx; > - cpu->hyperv_limits[2] = c->edx; > - } > - > - c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); > - if (c) { > - cpu->hyperv_spinlock_attempts = c->ebx; > - } > + cpu->hyperv_vendor_id[0] = > + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); > + cpu->hyperv_vendor_id[1] = > + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); > + cpu->hyperv_vendor_id[2] = > + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); > + cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, > + sizeof(cpu->hyperv_vendor_id) + 1); > + memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, > + sizeof(cpu->hyperv_vendor_id)); > + cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; > + > + cpu->hyperv_interface_id[0] = > + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EAX); > + cpu->hyperv_interface_id[1] = > + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EBX); > + cpu->hyperv_interface_id[2] = > + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_ECX); > + cpu->hyperv_interface_id[3] = > + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EDX); > + > + cpu->hyperv_version_id[0] = > + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EAX); > + cpu->hyperv_version_id[1] = > + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EBX); > + cpu->hyperv_version_id[2] = > + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_ECX); > + cpu->hyperv_version_id[3] = > + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EDX); > + > + cpu->hv_max_vps = hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, > + R_EAX); > + cpu->hyperv_limits[0] = > + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); > + cpu->hyperv_limits[1] = > + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); > + cpu->hyperv_limits[2] = > + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); > + > + cpu->hyperv_spinlock_attempts = > + hv_cpuid_get_host(cpuid, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); > } > > /* Features */ > @@ -1348,10 +1362,8 @@ static int hyperv_handle_properties(CPUState *cs, > if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { > c->eax |= HV_NO_NONARCH_CORESHARING; > } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { > - c2 = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); > - if (c2) { > - c->eax |= c2->eax & HV_NO_NONARCH_CORESHARING; > - } > + c->eax |= hv_cpuid_get_host(cpuid, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & > + HV_NO_NONARCH_CORESHARING; > } > > c = &cpuid_ent[cpuid_i++]; > -- > 2.30.2 >
Eduardo Habkost <ehabkost@redhat.com> writes: > On Thu, Apr 22, 2021 at 06:11:17PM +0200, Vitaly Kuznetsov wrote: >> As a preparation to implementing hv_cpuid_cache intro introduce >> hv_cpuid_get_host(). No functional change intended. >> >> Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> >> --- >> target/i386/kvm/kvm.c | 102 +++++++++++++++++++++++------------------- >> 1 file changed, 57 insertions(+), 45 deletions(-) >> >> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c >> index ba093dba4d23..7aeb704b016e 100644 >> --- a/target/i386/kvm/kvm.c >> +++ b/target/i386/kvm/kvm.c >> @@ -1107,6 +1107,19 @@ static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r) >> return 0; >> } >> >> +static uint32_t hv_cpuid_get_host(struct kvm_cpuid2 *cpuid, uint32_t func, >> + int reg) >> +{ >> + struct kvm_cpuid_entry2 *entry; >> + >> + entry = cpuid_find_entry(cpuid, func, 0); >> + if (!entry) { >> + return 0; > > One possible difference here is that leaves not supported by the > host will now be zeroed out. > > ...which I expected to be the correct behavior of hv-passthrough, Yes, at least that's the intention. > so: > > Reviewed-by: Eduardo Habkost <ehabkost@redhat.com> > Thanks! > >> + } >> + >> + return cpuid_entry_get_reg(entry, reg); >> +} >> + >> static bool hyperv_feature_supported(struct kvm_cpuid2 *cpuid, int feature) >> { >> uint32_t r, fw, bits; >> @@ -1203,7 +1216,7 @@ static int hyperv_handle_properties(CPUState *cs, >> { >> X86CPU *cpu = X86_CPU(cs); >> struct kvm_cpuid2 *cpuid; >> - struct kvm_cpuid_entry2 *c, *c2; >> + struct kvm_cpuid_entry2 *c; >> uint32_t cpuid_i = 0; >> int r; >> >> @@ -1235,46 +1248,47 @@ static int hyperv_handle_properties(CPUState *cs, >> } >> >> if (cpu->hyperv_passthrough) { >> - c = cpuid_find_entry(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 0); >> - if (c) { >> - cpu->hyperv_vendor_id[0] = c->ebx; >> - cpu->hyperv_vendor_id[1] = c->ecx; >> - cpu->hyperv_vendor_id[2] = c->edx; >> - cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, >> - sizeof(cpu->hyperv_vendor_id) + 1); >> - memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, >> - sizeof(cpu->hyperv_vendor_id)); >> - cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; >> - } >> - >> - c = cpuid_find_entry(cpuid, HV_CPUID_INTERFACE, 0); >> - if (c) { >> - cpu->hyperv_interface_id[0] = c->eax; >> - cpu->hyperv_interface_id[1] = c->ebx; >> - cpu->hyperv_interface_id[2] = c->ecx; >> - cpu->hyperv_interface_id[3] = c->edx; >> - } >> - >> - c = cpuid_find_entry(cpuid, HV_CPUID_VERSION, 0); >> - if (c) { >> - cpu->hyperv_version_id[0] = c->eax; >> - cpu->hyperv_version_id[1] = c->ebx; >> - cpu->hyperv_version_id[2] = c->ecx; >> - cpu->hyperv_version_id[3] = c->edx; >> - } >> - >> - c = cpuid_find_entry(cpuid, HV_CPUID_IMPLEMENT_LIMITS, 0); >> - if (c) { >> - cpu->hv_max_vps = c->eax; >> - cpu->hyperv_limits[0] = c->ebx; >> - cpu->hyperv_limits[1] = c->ecx; >> - cpu->hyperv_limits[2] = c->edx; >> - } >> - >> - c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); >> - if (c) { >> - cpu->hyperv_spinlock_attempts = c->ebx; >> - } >> + cpu->hyperv_vendor_id[0] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); >> + cpu->hyperv_vendor_id[1] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); >> + cpu->hyperv_vendor_id[2] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); >> + cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, >> + sizeof(cpu->hyperv_vendor_id) + 1); >> + memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, >> + sizeof(cpu->hyperv_vendor_id)); >> + cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; >> + >> + cpu->hyperv_interface_id[0] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EAX); >> + cpu->hyperv_interface_id[1] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EBX); >> + cpu->hyperv_interface_id[2] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_ECX); >> + cpu->hyperv_interface_id[3] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EDX); >> + >> + cpu->hyperv_version_id[0] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EAX); >> + cpu->hyperv_version_id[1] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EBX); >> + cpu->hyperv_version_id[2] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_ECX); >> + cpu->hyperv_version_id[3] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EDX); >> + >> + cpu->hv_max_vps = hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, >> + R_EAX); >> + cpu->hyperv_limits[0] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); >> + cpu->hyperv_limits[1] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); >> + cpu->hyperv_limits[2] = >> + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); >> + >> + cpu->hyperv_spinlock_attempts = >> + hv_cpuid_get_host(cpuid, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); >> } >> >> /* Features */ >> @@ -1348,10 +1362,8 @@ static int hyperv_handle_properties(CPUState *cs, >> if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { >> c->eax |= HV_NO_NONARCH_CORESHARING; >> } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { >> - c2 = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); >> - if (c2) { >> - c->eax |= c2->eax & HV_NO_NONARCH_CORESHARING; >> - } >> + c->eax |= hv_cpuid_get_host(cpuid, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & >> + HV_NO_NONARCH_CORESHARING; >> } >> >> c = &cpuid_ent[cpuid_i++]; >> -- >> 2.30.2 >>
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index ba093dba4d23..7aeb704b016e 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -1107,6 +1107,19 @@ static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r) return 0; } +static uint32_t hv_cpuid_get_host(struct kvm_cpuid2 *cpuid, uint32_t func, + int reg) +{ + struct kvm_cpuid_entry2 *entry; + + entry = cpuid_find_entry(cpuid, func, 0); + if (!entry) { + return 0; + } + + return cpuid_entry_get_reg(entry, reg); +} + static bool hyperv_feature_supported(struct kvm_cpuid2 *cpuid, int feature) { uint32_t r, fw, bits; @@ -1203,7 +1216,7 @@ static int hyperv_handle_properties(CPUState *cs, { X86CPU *cpu = X86_CPU(cs); struct kvm_cpuid2 *cpuid; - struct kvm_cpuid_entry2 *c, *c2; + struct kvm_cpuid_entry2 *c; uint32_t cpuid_i = 0; int r; @@ -1235,46 +1248,47 @@ static int hyperv_handle_properties(CPUState *cs, } if (cpu->hyperv_passthrough) { - c = cpuid_find_entry(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 0); - if (c) { - cpu->hyperv_vendor_id[0] = c->ebx; - cpu->hyperv_vendor_id[1] = c->ecx; - cpu->hyperv_vendor_id[2] = c->edx; - cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, - sizeof(cpu->hyperv_vendor_id) + 1); - memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, - sizeof(cpu->hyperv_vendor_id)); - cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_INTERFACE, 0); - if (c) { - cpu->hyperv_interface_id[0] = c->eax; - cpu->hyperv_interface_id[1] = c->ebx; - cpu->hyperv_interface_id[2] = c->ecx; - cpu->hyperv_interface_id[3] = c->edx; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_VERSION, 0); - if (c) { - cpu->hyperv_version_id[0] = c->eax; - cpu->hyperv_version_id[1] = c->ebx; - cpu->hyperv_version_id[2] = c->ecx; - cpu->hyperv_version_id[3] = c->edx; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_IMPLEMENT_LIMITS, 0); - if (c) { - cpu->hv_max_vps = c->eax; - cpu->hyperv_limits[0] = c->ebx; - cpu->hyperv_limits[1] = c->ecx; - cpu->hyperv_limits[2] = c->edx; - } - - c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); - if (c) { - cpu->hyperv_spinlock_attempts = c->ebx; - } + cpu->hyperv_vendor_id[0] = + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EBX); + cpu->hyperv_vendor_id[1] = + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_ECX); + cpu->hyperv_vendor_id[2] = + hv_cpuid_get_host(cpuid, HV_CPUID_VENDOR_AND_MAX_FUNCTIONS, R_EDX); + cpu->hyperv_vendor = g_realloc(cpu->hyperv_vendor, + sizeof(cpu->hyperv_vendor_id) + 1); + memcpy(cpu->hyperv_vendor, cpu->hyperv_vendor_id, + sizeof(cpu->hyperv_vendor_id)); + cpu->hyperv_vendor[sizeof(cpu->hyperv_vendor_id)] = 0; + + cpu->hyperv_interface_id[0] = + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EAX); + cpu->hyperv_interface_id[1] = + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EBX); + cpu->hyperv_interface_id[2] = + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_ECX); + cpu->hyperv_interface_id[3] = + hv_cpuid_get_host(cpuid, HV_CPUID_INTERFACE, R_EDX); + + cpu->hyperv_version_id[0] = + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EAX); + cpu->hyperv_version_id[1] = + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EBX); + cpu->hyperv_version_id[2] = + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_ECX); + cpu->hyperv_version_id[3] = + hv_cpuid_get_host(cpuid, HV_CPUID_VERSION, R_EDX); + + cpu->hv_max_vps = hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, + R_EAX); + cpu->hyperv_limits[0] = + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_EBX); + cpu->hyperv_limits[1] = + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_ECX); + cpu->hyperv_limits[2] = + hv_cpuid_get_host(cpuid, HV_CPUID_IMPLEMENT_LIMITS, R_EDX); + + cpu->hyperv_spinlock_attempts = + hv_cpuid_get_host(cpuid, HV_CPUID_ENLIGHTMENT_INFO, R_EBX); } /* Features */ @@ -1348,10 +1362,8 @@ static int hyperv_handle_properties(CPUState *cs, if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { c->eax |= HV_NO_NONARCH_CORESHARING; } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { - c2 = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); - if (c2) { - c->eax |= c2->eax & HV_NO_NONARCH_CORESHARING; - } + c->eax |= hv_cpuid_get_host(cpuid, HV_CPUID_ENLIGHTMENT_INFO, R_EAX) & + HV_NO_NONARCH_CORESHARING; } c = &cpuid_ent[cpuid_i++];
As a preparation to implementing hv_cpuid_cache intro introduce hv_cpuid_get_host(). No functional change intended. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> --- target/i386/kvm/kvm.c | 102 +++++++++++++++++++++++------------------- 1 file changed, 57 insertions(+), 45 deletions(-)