Message ID | 20210930071326.857390-12-andr2000@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | PCI devices passthrough on Arm, part 2 | expand |
On 30.09.2021 09:13, Oleksandr Andrushchenko wrote: > From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> > > vPCI may map and unmap PCI device memory (BARs) being passed through which > may take a lot of time. For this those operations may be deferred to be > performed later, so that they can be safely preempted. > > Currently this deferred processing is happening in common IOREQ code > which doesn't seem to be the right place for x86 and is even more > doubtful because IOREQ may not be enabled for Arm at all. > So, for Arm the pending vPCI work may have no chance to be executed > if the processing is left as is in the common IOREQ code only. > For that reason make vPCI processing happen in arch specific code. > > Please be aware that there are a few outstanding TODOs affecting this > code path, see xen/drivers/vpci/header.c:map_range and > xen/drivers/vpci/header.c:vpci_process_pending. > > Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> > > --- > Since v2: > - update commit message with more insight on x86, IOREQ and Arm > - restored order of invocation for IOREQ and vPCI processing (Jan) > Since v1: > - Moved the check for pending vpci work from the common IOREQ code > to hvm_do_resume on x86 > - Re-worked the code for Arm to ensure we don't miss pending vPCI work > --- > xen/arch/arm/traps.c | 13 +++++++++++++ > xen/arch/x86/hvm/hvm.c | 6 ++++++ This x86 change Acked-by: Jan Beulich <jbeulich@suse.com> > xen/common/ioreq.c | 9 --------- Already on v2 I did indicate that you need to Cc Paul to get an ack for this part of the change. > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -546,6 +546,12 @@ void hvm_do_resume(struct vcpu *v) > > pt_restore_timer(v); > > + if ( has_vpci(v->domain) && vpci_process_pending(v) ) > + { > + raise_softirq(SCHEDULE_SOFTIRQ); > + return; > + } > + > if ( !vcpu_ioreq_handle_completion(v) ) > return; Seeing this movement I'm now wondering whether we shouldn't take the opportunity and move this further up. I notice that besides Paul you also failed to Cc the other x86 maintainers, whom I'm now adding. Jan > --- a/xen/common/ioreq.c > +++ b/xen/common/ioreq.c > @@ -25,9 +25,7 @@ > #include <xen/lib.h> > #include <xen/paging.h> > #include <xen/sched.h> > -#include <xen/softirq.h> > #include <xen/trace.h> > -#include <xen/vpci.h> > > #include <asm/guest_atomics.h> > #include <asm/ioreq.h> > @@ -212,19 +210,12 @@ static bool wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p) > > bool vcpu_ioreq_handle_completion(struct vcpu *v) > { > - struct domain *d = v->domain; > struct vcpu_io *vio = &v->io; > struct ioreq_server *s; > struct ioreq_vcpu *sv; > enum vio_completion completion; > bool res = true; > > - if ( has_vpci(d) && vpci_process_pending(v) ) > - { > - raise_softirq(SCHEDULE_SOFTIRQ); > - return false; > - } > - > while ( (sv = get_pending_vcpu(v, &s)) != NULL ) > if ( !wait_for_io(sv, get_ioreq(s, v)) ) > return false; >
On Thu, 30 Sep 2021, Oleksandr Andrushchenko wrote: > From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> > > vPCI may map and unmap PCI device memory (BARs) being passed through which > may take a lot of time. For this those operations may be deferred to be > performed later, so that they can be safely preempted. > > Currently this deferred processing is happening in common IOREQ code > which doesn't seem to be the right place for x86 and is even more > doubtful because IOREQ may not be enabled for Arm at all. > So, for Arm the pending vPCI work may have no chance to be executed > if the processing is left as is in the common IOREQ code only. > For that reason make vPCI processing happen in arch specific code. > > Please be aware that there are a few outstanding TODOs affecting this > code path, see xen/drivers/vpci/header.c:map_range and > xen/drivers/vpci/header.c:vpci_process_pending. > > Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> > --- > Since v2: > - update commit message with more insight on x86, IOREQ and Arm > - restored order of invocation for IOREQ and vPCI processing (Jan) > Since v1: > - Moved the check for pending vpci work from the common IOREQ code > to hvm_do_resume on x86 > - Re-worked the code for Arm to ensure we don't miss pending vPCI work > --- > xen/arch/arm/traps.c | 13 +++++++++++++ > xen/arch/x86/hvm/hvm.c | 6 ++++++ > xen/common/ioreq.c | 9 --------- > 3 files changed, 19 insertions(+), 9 deletions(-) > > diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c > index 219ab3c3fbde..b246f51086e3 100644 > --- a/xen/arch/arm/traps.c > +++ b/xen/arch/arm/traps.c > @@ -34,6 +34,7 @@ > #include <xen/symbols.h> > #include <xen/version.h> > #include <xen/virtual_region.h> > +#include <xen/vpci.h> > > #include <public/sched.h> > #include <public/xen.h> > @@ -2304,6 +2305,18 @@ static bool check_for_vcpu_work(void) > } > #endif > > + if ( has_vpci(v->domain) ) > + { > + bool pending; > + > + local_irq_enable(); > + pending = vpci_process_pending(v); > + local_irq_disable(); > + > + if ( pending ) > + return true; > + } > + > if ( likely(!v->arch.need_flush_to_ram) ) > return false; > > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index 7b48a1b925bb..38bf697c5b04 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -546,6 +546,12 @@ void hvm_do_resume(struct vcpu *v) > > pt_restore_timer(v); > > + if ( has_vpci(v->domain) && vpci_process_pending(v) ) > + { > + raise_softirq(SCHEDULE_SOFTIRQ); > + return; > + } > + > if ( !vcpu_ioreq_handle_completion(v) ) > return; > > diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c > index d732dc045df9..689d256544c8 100644 > --- a/xen/common/ioreq.c > +++ b/xen/common/ioreq.c > @@ -25,9 +25,7 @@ > #include <xen/lib.h> > #include <xen/paging.h> > #include <xen/sched.h> > -#include <xen/softirq.h> > #include <xen/trace.h> > -#include <xen/vpci.h> > > #include <asm/guest_atomics.h> > #include <asm/ioreq.h> > @@ -212,19 +210,12 @@ static bool wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p) > > bool vcpu_ioreq_handle_completion(struct vcpu *v) > { > - struct domain *d = v->domain; > struct vcpu_io *vio = &v->io; > struct ioreq_server *s; > struct ioreq_vcpu *sv; > enum vio_completion completion; > bool res = true; > > - if ( has_vpci(d) && vpci_process_pending(v) ) > - { > - raise_softirq(SCHEDULE_SOFTIRQ); > - return false; > - } > - > while ( (sv = get_pending_vcpu(v, &s)) != NULL ) > if ( !wait_for_io(sv, get_ioreq(s, v)) ) > return false; > -- > 2.25.1 >
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c index 219ab3c3fbde..b246f51086e3 100644 --- a/xen/arch/arm/traps.c +++ b/xen/arch/arm/traps.c @@ -34,6 +34,7 @@ #include <xen/symbols.h> #include <xen/version.h> #include <xen/virtual_region.h> +#include <xen/vpci.h> #include <public/sched.h> #include <public/xen.h> @@ -2304,6 +2305,18 @@ static bool check_for_vcpu_work(void) } #endif + if ( has_vpci(v->domain) ) + { + bool pending; + + local_irq_enable(); + pending = vpci_process_pending(v); + local_irq_disable(); + + if ( pending ) + return true; + } + if ( likely(!v->arch.need_flush_to_ram) ) return false; diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 7b48a1b925bb..38bf697c5b04 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -546,6 +546,12 @@ void hvm_do_resume(struct vcpu *v) pt_restore_timer(v); + if ( has_vpci(v->domain) && vpci_process_pending(v) ) + { + raise_softirq(SCHEDULE_SOFTIRQ); + return; + } + if ( !vcpu_ioreq_handle_completion(v) ) return; diff --git a/xen/common/ioreq.c b/xen/common/ioreq.c index d732dc045df9..689d256544c8 100644 --- a/xen/common/ioreq.c +++ b/xen/common/ioreq.c @@ -25,9 +25,7 @@ #include <xen/lib.h> #include <xen/paging.h> #include <xen/sched.h> -#include <xen/softirq.h> #include <xen/trace.h> -#include <xen/vpci.h> #include <asm/guest_atomics.h> #include <asm/ioreq.h> @@ -212,19 +210,12 @@ static bool wait_for_io(struct ioreq_vcpu *sv, ioreq_t *p) bool vcpu_ioreq_handle_completion(struct vcpu *v) { - struct domain *d = v->domain; struct vcpu_io *vio = &v->io; struct ioreq_server *s; struct ioreq_vcpu *sv; enum vio_completion completion; bool res = true; - if ( has_vpci(d) && vpci_process_pending(v) ) - { - raise_softirq(SCHEDULE_SOFTIRQ); - return false; - } - while ( (sv = get_pending_vcpu(v, &s)) != NULL ) if ( !wait_for_io(sv, get_ioreq(s, v)) ) return false;