Message ID | 1483163161-2402-8-git-send-email-suravee.suthikulpanit@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 31/12/2016 05:45, Suravee Suthikulpanit wrote: > diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c > index c0b7151..6351c8e 100644 > --- a/xen/arch/x86/hvm/svm/avic.c > +++ b/xen/arch/x86/hvm/svm/avic.c > @@ -73,6 +73,79 @@ avic_get_phy_apic_id_ent(const struct vcpu *v, unsigned int index) > return &avic_phy_apic_id_table[index]; > } > > +static void avic_vcpu_load(struct vcpu *v) > +{ > + struct arch_svm_struct *s = &v->arch.hvm_svm; > + int h_phy_apic_id; > + struct avic_phy_apic_id_ent entry; > + > + if ( !s->avic_last_phy_id ) > + return; What is the purpose of this check? The VM should uniformly be using AVIC or not. > + > + if ( test_bit(_VPF_blocked, &v->pause_flags) ) > + return; This has better never be true if the scheduler has decides to context switch into v's state. ~Andrew
On 12/31/2016 12:45 AM, Suravee Suthikulpanit wrote: > Add hooks to manage AVIC data structure during vcpu scheduling. > > Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> > Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> > Cc: Jan Beulich <JBeulich@suse.com> > Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> > --- > xen/arch/x86/hvm/svm/avic.c | 78 +++++++++++++++++++++++++++++++++++++++++++++ > xen/arch/x86/hvm/svm/svm.c | 10 ++++++ > 2 files changed, 88 insertions(+) > > diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c > index c0b7151..6351c8e 100644 > --- a/xen/arch/x86/hvm/svm/avic.c > +++ b/xen/arch/x86/hvm/svm/avic.c > @@ -73,6 +73,79 @@ avic_get_phy_apic_id_ent(const struct vcpu *v, unsigned int index) > return &avic_phy_apic_id_table[index]; > } > > +static void avic_vcpu_load(struct vcpu *v) > +{ > + struct arch_svm_struct *s = &v->arch.hvm_svm; > + int h_phy_apic_id; > + struct avic_phy_apic_id_ent entry; > + > + if ( !s->avic_last_phy_id ) > + return; > + > + if ( test_bit(_VPF_blocked, &v->pause_flags) ) > + return; > + > + /* > + * Note: APIC ID = 0xff is used for broadcast. > + * APIC ID > 0xff is reserved. > + */ > + h_phy_apic_id = cpu_data[v->processor].apicid; > + ASSERT(h_phy_apic_id < AVIC_PHY_APIC_ID_MAX); > + > + entry = *(s->avic_last_phy_id); > + smp_rmb(); > + entry.host_phy_apic_id = h_phy_apic_id; > + entry.is_running = 1; > + *(s->avic_last_phy_id) = entry; > + smp_wmb(); > +} > + > +static void avic_vcpu_unload(struct vcpu *v) > +{ > + struct arch_svm_struct *s = &v->arch.hvm_svm; > + struct avic_phy_apic_id_ent entry; > + > + if ( !svm_avic || !s->avic_last_phy_id ) > + return; > + > + entry = *(s->avic_last_phy_id); > + smp_rmb(); > + entry.is_running = 0; > + *(s->avic_last_phy_id) = entry; > + smp_wmb(); > +} > + > +static void avic_vcpu_resume(struct vcpu *v) > +{ > + struct avic_phy_apic_id_ent entry; > + struct arch_svm_struct *s = &v->arch.hvm_svm; > + > + ASSERT(svm_avic_vcpu_enabled(v)); > + ASSERT(s->avic_last_phy_id); > + ASSERT(!test_bit(_VPF_blocked, &v->pause_flags)); > + > + entry = *(s->avic_last_phy_id); > + smp_rmb(); > + entry.is_running = 1; > + *(s->avic_last_phy_id) = entry; > + smp_wmb(); > +} > + > +static void avic_vcpu_block(struct vcpu *v) > +{ > + struct avic_phy_apic_id_ent entry; > + struct arch_svm_struct *s = &v->arch.hvm_svm; > + > + ASSERT(svm_avic_vcpu_enabled(v)); > + ASSERT(s->avic_last_phy_id); > + > + entry = *(s->avic_last_phy_id); > + smp_rmb(); > + entry.is_running = 0; > + *(s->avic_last_phy_id) = entry; > + smp_wmb(); > +} I don't understand use of r/w barriers here (and I think Andrew had similar comment) but in any case the last 5 lines can be factored out. > int svm_avic_dom_init(struct domain *d) > { > int ret = 0; > @@ -127,6 +200,11 @@ int svm_avic_dom_init(struct domain *d) > > spin_lock_init(&d->arch.hvm_domain.svm.avic_ldr_mode_lock); > > + d->arch.hvm_domain.pi_ops.pi_switch_to = avic_vcpu_unload; > + d->arch.hvm_domain.pi_ops.pi_switch_from = avic_vcpu_load; > + d->arch.hvm_domain.pi_ops.vcpu_block = avic_vcpu_block; > + d->arch.hvm_domain.pi_ops.pi_do_resume = avic_vcpu_resume; > + I wonder whether it might be better to declare a (static) svm_pi_ops structure and assign is here to d->arch.hvm_domain.pi_ops. And make a similar change in patch 1 for VMX. -boris
diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c index c0b7151..6351c8e 100644 --- a/xen/arch/x86/hvm/svm/avic.c +++ b/xen/arch/x86/hvm/svm/avic.c @@ -73,6 +73,79 @@ avic_get_phy_apic_id_ent(const struct vcpu *v, unsigned int index) return &avic_phy_apic_id_table[index]; } +static void avic_vcpu_load(struct vcpu *v) +{ + struct arch_svm_struct *s = &v->arch.hvm_svm; + int h_phy_apic_id; + struct avic_phy_apic_id_ent entry; + + if ( !s->avic_last_phy_id ) + return; + + if ( test_bit(_VPF_blocked, &v->pause_flags) ) + return; + + /* + * Note: APIC ID = 0xff is used for broadcast. + * APIC ID > 0xff is reserved. + */ + h_phy_apic_id = cpu_data[v->processor].apicid; + ASSERT(h_phy_apic_id < AVIC_PHY_APIC_ID_MAX); + + entry = *(s->avic_last_phy_id); + smp_rmb(); + entry.host_phy_apic_id = h_phy_apic_id; + entry.is_running = 1; + *(s->avic_last_phy_id) = entry; + smp_wmb(); +} + +static void avic_vcpu_unload(struct vcpu *v) +{ + struct arch_svm_struct *s = &v->arch.hvm_svm; + struct avic_phy_apic_id_ent entry; + + if ( !svm_avic || !s->avic_last_phy_id ) + return; + + entry = *(s->avic_last_phy_id); + smp_rmb(); + entry.is_running = 0; + *(s->avic_last_phy_id) = entry; + smp_wmb(); +} + +static void avic_vcpu_resume(struct vcpu *v) +{ + struct avic_phy_apic_id_ent entry; + struct arch_svm_struct *s = &v->arch.hvm_svm; + + ASSERT(svm_avic_vcpu_enabled(v)); + ASSERT(s->avic_last_phy_id); + ASSERT(!test_bit(_VPF_blocked, &v->pause_flags)); + + entry = *(s->avic_last_phy_id); + smp_rmb(); + entry.is_running = 1; + *(s->avic_last_phy_id) = entry; + smp_wmb(); +} + +static void avic_vcpu_block(struct vcpu *v) +{ + struct avic_phy_apic_id_ent entry; + struct arch_svm_struct *s = &v->arch.hvm_svm; + + ASSERT(svm_avic_vcpu_enabled(v)); + ASSERT(s->avic_last_phy_id); + + entry = *(s->avic_last_phy_id); + smp_rmb(); + entry.is_running = 0; + *(s->avic_last_phy_id) = entry; + smp_wmb(); +} + int svm_avic_dom_init(struct domain *d) { int ret = 0; @@ -127,6 +200,11 @@ int svm_avic_dom_init(struct domain *d) spin_lock_init(&d->arch.hvm_domain.svm.avic_ldr_mode_lock); + d->arch.hvm_domain.pi_ops.pi_switch_to = avic_vcpu_unload; + d->arch.hvm_domain.pi_ops.pi_switch_from = avic_vcpu_load; + d->arch.hvm_domain.pi_ops.vcpu_block = avic_vcpu_block; + d->arch.hvm_domain.pi_ops.pi_do_resume = avic_vcpu_resume; + return ret; err_out: svm_avic_dom_destroy(d); diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index cb5281c..df59b8d 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -1012,6 +1012,10 @@ static void svm_ctxt_switch_from(struct vcpu *v) svm_tsc_ratio_save(v); svm_sync_vmcb(v); + + if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_from ) + v->domain->arch.hvm_domain.pi_ops.pi_switch_from(v); + svm_vmload(per_cpu(root_vmcb, cpu)); /* Resume use of ISTs now that the host TR is reinstated. */ @@ -1048,6 +1052,9 @@ static void svm_ctxt_switch_to(struct vcpu *v) svm_lwp_load(v); svm_tsc_ratio_load(v); + if ( v->domain->arch.hvm_domain.pi_ops.pi_switch_to ) + v->domain->arch.hvm_domain.pi_ops.pi_switch_to(v); + if ( cpu_has_rdtscp ) wrmsrl(MSR_TSC_AUX, hvm_msr_tsc_aux(v)); } @@ -1093,6 +1100,9 @@ static void noreturn svm_do_resume(struct vcpu *v) vmcb_set_vintr(vmcb, intr); } + if ( v->domain->arch.hvm_domain.pi_ops.pi_do_resume ) + v->domain->arch.hvm_domain.pi_ops.pi_do_resume(v); + hvm_do_resume(v); reset_stack_and_jump(svm_asm_do_resume);
Add hooks to manage AVIC data structure during vcpu scheduling. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> --- xen/arch/x86/hvm/svm/avic.c | 78 +++++++++++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/svm/svm.c | 10 ++++++ 2 files changed, 88 insertions(+)