Message ID | 20221025162004.8501-2-olekstysh@gmail.com (mailing list archive) |
---|---|
State | Accepted |
Commit | 99b9f0ed57c5b510e00bc7d47314f7515030fe25 |
Headers | show |
Series | xen/virtio: Handle PCI devices which Host controller is described in DT | expand |
On 10/25/22 19:20, Oleksandr Tyshchenko wrote: > From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> > > This is needed to avoid having to parse the same device-tree > several times for a given device. > > For this to work we need to install the xen_virtio_restricted_mem_acc > callback in Arm's xen_guest_init() which is same callback as x86's > PV and HVM modes already use and remove the manual assignment in > xen_setup_dma_ops(). Also we need to split the code to initialize > backend_domid into a separate function. > > Prior to current patch we parsed the device-tree three times: > 1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device() > 2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid() > 3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device() > > With current patch we parse the device-tree only once in > xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid() > > Other benefits are: > - Not diverge from x86 when setting up Xen grant DMA ops > - Drop several global functions > > Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Reviewed-by: Xenia Ragiadakou <burzalodowa@gmail.com> I have a question unrelated to the patch. CONFIG_XEN_VIRTIO_FORCE_GRANT cannot be used to force backend dom0 in case xen_dt_grant_init_backend_domid() fails? > --- > New patch > --- > arch/arm/xen/enlighten.c | 2 +- > drivers/xen/grant-dma-ops.c | 77 ++++++++++++++----------------------- > include/xen/arm/xen-ops.h | 4 +- > include/xen/xen-ops.h | 16 -------- > 4 files changed, 30 insertions(+), 69 deletions(-) > > diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c > index 93c8ccbf2982..7d59765aef22 100644 > --- a/arch/arm/xen/enlighten.c > +++ b/arch/arm/xen/enlighten.c > @@ -445,7 +445,7 @@ static int __init xen_guest_init(void) > return 0; > > if (IS_ENABLED(CONFIG_XEN_VIRTIO)) > - virtio_set_mem_acc_cb(xen_virtio_mem_acc); > + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); > > if (!acpi_disabled) > xen_acpi_guest_init(); > diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c > index daa525df7bdc..1e797a043980 100644 > --- a/drivers/xen/grant-dma-ops.c > +++ b/drivers/xen/grant-dma-ops.c > @@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = { > .dma_supported = xen_grant_dma_supported, > }; > > -static bool xen_is_dt_grant_dma_device(struct device *dev) > -{ > - struct device_node *iommu_np; > - bool has_iommu; > - > - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); > - has_iommu = iommu_np && > - of_device_is_compatible(iommu_np, "xen,grant-dma"); > - of_node_put(iommu_np); > - > - return has_iommu; > -} > - > -bool xen_is_grant_dma_device(struct device *dev) > -{ > - /* XXX Handle only DT devices for now */ > - if (dev->of_node) > - return xen_is_dt_grant_dma_device(dev); > - > - return false; > -} > - > -bool xen_virtio_mem_acc(struct virtio_device *dev) > -{ > - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) > - return true; > - > - return xen_is_grant_dma_device(dev->dev.parent); > -} > - > static int xen_dt_grant_init_backend_domid(struct device *dev, > - struct xen_grant_dma_data *data) > + domid_t *backend_domid) > { > struct of_phandle_args iommu_spec; > > if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", > 0, &iommu_spec)) { > - dev_err(dev, "Cannot parse iommus property\n"); > + dev_dbg(dev, "Cannot parse iommus property\n"); > return -ESRCH; > } > > if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || > iommu_spec.args_count != 1) { > - dev_err(dev, "Incompatible IOMMU node\n"); > + dev_dbg(dev, "Incompatible IOMMU node\n"); > of_node_put(iommu_spec.np); > return -ESRCH; > } > @@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev, > * The endpoint ID here means the ID of the domain where the > * corresponding backend is running > */ > - data->backend_domid = iommu_spec.args[0]; > + *backend_domid = iommu_spec.args[0]; > > return 0; > } > > -void xen_grant_setup_dma_ops(struct device *dev) > +static int xen_grant_init_backend_domid(struct device *dev, > + domid_t *backend_domid) > +{ > + int ret = -ENODEV; > + > + if (dev->of_node) { > + ret = xen_dt_grant_init_backend_domid(dev, backend_domid); > + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) { > + dev_info(dev, "Using dom0 as backend\n"); > + *backend_domid = 0; > + ret = 0; > + } > + > + return ret; > +} > + > +static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid) > { > struct xen_grant_dma_data *data; > > @@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev) > if (!data) > goto err; > > - if (dev->of_node) { > - if (xen_dt_grant_init_backend_domid(dev, data)) > - goto err; > - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) { > - dev_info(dev, "Using dom0 as backend\n"); > - data->backend_domid = 0; > - } else { > - /* XXX ACPI device unsupported for now */ > - goto err; > - } > + data->backend_domid = backend_domid; > > if (store_xen_grant_dma_data(dev, data)) { > dev_err(dev, "Cannot store Xen grant DMA data\n"); > @@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev) > > bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) > { > - bool ret = xen_virtio_mem_acc(dev); > + domid_t backend_domid; > > - if (ret) > - xen_grant_setup_dma_ops(dev->dev.parent); > + if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) { > + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid); > + return true; > + } > > - return ret; > + return false; > } > > MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); > diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h > index b0766a660338..70073f5a2b54 100644 > --- a/include/xen/arm/xen-ops.h > +++ b/include/xen/arm/xen-ops.h > @@ -8,9 +8,7 @@ > static inline void xen_setup_dma_ops(struct device *dev) > { > #ifdef CONFIG_XEN > - if (xen_is_grant_dma_device(dev)) > - xen_grant_setup_dma_ops(dev); > - else if (xen_swiotlb_detect()) > + if (xen_swiotlb_detect()) > dev->dma_ops = &xen_swiotlb_dma_ops; > #endif > } > diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h > index a34f4271a2e9..47f11bec5e90 100644 > --- a/include/xen/xen-ops.h > +++ b/include/xen/xen-ops.h > @@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { } > #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ > > #ifdef CONFIG_XEN_GRANT_DMA_OPS > -void xen_grant_setup_dma_ops(struct device *dev); > -bool xen_is_grant_dma_device(struct device *dev); > -bool xen_virtio_mem_acc(struct virtio_device *dev); > bool xen_virtio_restricted_mem_acc(struct virtio_device *dev); > #else > -static inline void xen_grant_setup_dma_ops(struct device *dev) > -{ > -} > -static inline bool xen_is_grant_dma_device(struct device *dev) > -{ > - return false; > -} > - > struct virtio_device; > > -static inline bool xen_virtio_mem_acc(struct virtio_device *dev) > -{ > - return false; > -} > - > static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) > { > return false;
On 25.10.22 20:27, Xenia Ragiadakou wrote: Hello Xenia > On 10/25/22 19:20, Oleksandr Tyshchenko wrote: >> From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> >> >> This is needed to avoid having to parse the same device-tree >> several times for a given device. >> >> For this to work we need to install the xen_virtio_restricted_mem_acc >> callback in Arm's xen_guest_init() which is same callback as x86's >> PV and HVM modes already use and remove the manual assignment in >> xen_setup_dma_ops(). Also we need to split the code to initialize >> backend_domid into a separate function. >> >> Prior to current patch we parsed the device-tree three times: >> 1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device() >> 2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid() >> 3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device() >> >> With current patch we parse the device-tree only once in >> xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid() >> >> Other benefits are: >> - Not diverge from x86 when setting up Xen grant DMA ops >> - Drop several global functions >> >> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> > > Reviewed-by: Xenia Ragiadakou <burzalodowa@gmail.com> Thanks! > > I have a question unrelated to the patch. > CONFIG_XEN_VIRTIO_FORCE_GRANT cannot be used to force backend dom0 in > case xen_dt_grant_init_backend_domid() fails? Good question, as always) Current patch doesn't change behavior in the context of CONFIG_XEN_VIRTIO_FORCE_GRANT usage on Arm with device-tree, this option is not applied for device-tree based devices, as for them we have a way to communicate backend_domid, so no need to guess. Below my understanding, which might be wrong. The xen_dt_grant_init_backend_domid() failure means that we didn't retrieve the backend_domid from the device node (either the bindings is wrong or it is absent at all, the later means that device is *not* required use grants for virtio). I don't really know whether forcing the grant usage with domid = 0 would be the good idea in that case, this just might not work. For the instance, if the backend is other than Dom0 domain or it is in Dom0 but doesn't support grant mappings. From other hand, the CONFIG_XEN_VIRTIO_FORCE_GRANT is disabled by default, if it gets enabled then the user is likely aware of the consequences. If we want to always honor CONFIG_XEN_VIRTIO_FORCE_GRANT, we would likely need to have "if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT))" check the first (before the check for DT device). > > >> --- >> New patch >> --- >> arch/arm/xen/enlighten.c | 2 +- >> drivers/xen/grant-dma-ops.c | 77 ++++++++++++++----------------------- >> include/xen/arm/xen-ops.h | 4 +- >> include/xen/xen-ops.h | 16 -------- >> 4 files changed, 30 insertions(+), 69 deletions(-) >> >> diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c >> index 93c8ccbf2982..7d59765aef22 100644 >> --- a/arch/arm/xen/enlighten.c >> +++ b/arch/arm/xen/enlighten.c >> @@ -445,7 +445,7 @@ static int __init xen_guest_init(void) >> return 0; >> if (IS_ENABLED(CONFIG_XEN_VIRTIO)) >> - virtio_set_mem_acc_cb(xen_virtio_mem_acc); >> + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); >> if (!acpi_disabled) >> xen_acpi_guest_init(); >> diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c >> index daa525df7bdc..1e797a043980 100644 >> --- a/drivers/xen/grant-dma-ops.c >> +++ b/drivers/xen/grant-dma-ops.c >> @@ -292,50 +292,20 @@ static const struct dma_map_ops >> xen_grant_dma_ops = { >> .dma_supported = xen_grant_dma_supported, >> }; >> -static bool xen_is_dt_grant_dma_device(struct device *dev) >> -{ >> - struct device_node *iommu_np; >> - bool has_iommu; >> - >> - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); >> - has_iommu = iommu_np && >> - of_device_is_compatible(iommu_np, "xen,grant-dma"); >> - of_node_put(iommu_np); >> - >> - return has_iommu; >> -} >> - >> -bool xen_is_grant_dma_device(struct device *dev) >> -{ >> - /* XXX Handle only DT devices for now */ >> - if (dev->of_node) >> - return xen_is_dt_grant_dma_device(dev); >> - >> - return false; >> -} >> - >> -bool xen_virtio_mem_acc(struct virtio_device *dev) >> -{ >> - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) >> - return true; >> - >> - return xen_is_grant_dma_device(dev->dev.parent); >> -} >> - >> static int xen_dt_grant_init_backend_domid(struct device *dev, >> - struct xen_grant_dma_data *data) >> + domid_t *backend_domid) >> { >> struct of_phandle_args iommu_spec; >> if (of_parse_phandle_with_args(dev->of_node, "iommus", >> "#iommu-cells", >> 0, &iommu_spec)) { >> - dev_err(dev, "Cannot parse iommus property\n"); >> + dev_dbg(dev, "Cannot parse iommus property\n"); >> return -ESRCH; >> } >> if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || >> iommu_spec.args_count != 1) { >> - dev_err(dev, "Incompatible IOMMU node\n"); >> + dev_dbg(dev, "Incompatible IOMMU node\n"); >> of_node_put(iommu_spec.np); >> return -ESRCH; >> } >> @@ -346,12 +316,28 @@ static int >> xen_dt_grant_init_backend_domid(struct device *dev, >> * The endpoint ID here means the ID of the domain where the >> * corresponding backend is running >> */ >> - data->backend_domid = iommu_spec.args[0]; >> + *backend_domid = iommu_spec.args[0]; >> return 0; >> } >> -void xen_grant_setup_dma_ops(struct device *dev) >> +static int xen_grant_init_backend_domid(struct device *dev, >> + domid_t *backend_domid) >> +{ >> + int ret = -ENODEV; >> + >> + if (dev->of_node) { >> + ret = xen_dt_grant_init_backend_domid(dev, backend_domid); >> + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || >> xen_pv_domain()) { >> + dev_info(dev, "Using dom0 as backend\n"); >> + *backend_domid = 0; >> + ret = 0; >> + } >> + >> + return ret; >> +} >> + >> +static void xen_grant_setup_dma_ops(struct device *dev, domid_t >> backend_domid) >> { >> struct xen_grant_dma_data *data; >> @@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev) >> if (!data) >> goto err; >> - if (dev->of_node) { >> - if (xen_dt_grant_init_backend_domid(dev, data)) >> - goto err; >> - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) { >> - dev_info(dev, "Using dom0 as backend\n"); >> - data->backend_domid = 0; >> - } else { >> - /* XXX ACPI device unsupported for now */ >> - goto err; >> - } >> + data->backend_domid = backend_domid; >> if (store_xen_grant_dma_data(dev, data)) { >> dev_err(dev, "Cannot store Xen grant DMA data\n"); >> @@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev) >> bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) >> { >> - bool ret = xen_virtio_mem_acc(dev); >> + domid_t backend_domid; >> - if (ret) >> - xen_grant_setup_dma_ops(dev->dev.parent); >> + if (!xen_grant_init_backend_domid(dev->dev.parent, >> &backend_domid)) { >> + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid); >> + return true; >> + } >> - return ret; >> + return false; >> } >> MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); >> diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h >> index b0766a660338..70073f5a2b54 100644 >> --- a/include/xen/arm/xen-ops.h >> +++ b/include/xen/arm/xen-ops.h >> @@ -8,9 +8,7 @@ >> static inline void xen_setup_dma_ops(struct device *dev) >> { >> #ifdef CONFIG_XEN >> - if (xen_is_grant_dma_device(dev)) >> - xen_grant_setup_dma_ops(dev); >> - else if (xen_swiotlb_detect()) >> + if (xen_swiotlb_detect()) >> dev->dma_ops = &xen_swiotlb_dma_ops; >> #endif >> } >> diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h >> index a34f4271a2e9..47f11bec5e90 100644 >> --- a/include/xen/xen-ops.h >> +++ b/include/xen/xen-ops.h >> @@ -216,26 +216,10 @@ static inline void >> xen_preemptible_hcall_end(void) { } >> #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ >> #ifdef CONFIG_XEN_GRANT_DMA_OPS >> -void xen_grant_setup_dma_ops(struct device *dev); >> -bool xen_is_grant_dma_device(struct device *dev); >> -bool xen_virtio_mem_acc(struct virtio_device *dev); >> bool xen_virtio_restricted_mem_acc(struct virtio_device *dev); >> #else >> -static inline void xen_grant_setup_dma_ops(struct device *dev) >> -{ >> -} >> -static inline bool xen_is_grant_dma_device(struct device *dev) >> -{ >> - return false; >> -} >> - >> struct virtio_device; >> -static inline bool xen_virtio_mem_acc(struct virtio_device *dev) >> -{ >> - return false; >> -} >> - >> static inline bool xen_virtio_restricted_mem_acc(struct >> virtio_device *dev) >> { >> return false; >
On Tue, 25 Oct 2022, Oleksandr Tyshchenko wrote: > From: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> > > This is needed to avoid having to parse the same device-tree > several times for a given device. > > For this to work we need to install the xen_virtio_restricted_mem_acc > callback in Arm's xen_guest_init() which is same callback as x86's > PV and HVM modes already use and remove the manual assignment in > xen_setup_dma_ops(). Also we need to split the code to initialize > backend_domid into a separate function. > > Prior to current patch we parsed the device-tree three times: > 1. xen_setup_dma_ops()->...->xen_is_dt_grant_dma_device() > 2. xen_setup_dma_ops()->...->xen_dt_grant_init_backend_domid() > 3. xen_virtio_mem_acc()->...->xen_is_dt_grant_dma_device() > > With current patch we parse the device-tree only once in > xen_virtio_restricted_mem_acc()->...->xen_dt_grant_init_backend_domid() > > Other benefits are: > - Not diverge from x86 when setting up Xen grant DMA ops > - Drop several global functions > > Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> > --- > New patch > --- > arch/arm/xen/enlighten.c | 2 +- > drivers/xen/grant-dma-ops.c | 77 ++++++++++++++----------------------- > include/xen/arm/xen-ops.h | 4 +- > include/xen/xen-ops.h | 16 -------- > 4 files changed, 30 insertions(+), 69 deletions(-) > > diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c > index 93c8ccbf2982..7d59765aef22 100644 > --- a/arch/arm/xen/enlighten.c > +++ b/arch/arm/xen/enlighten.c > @@ -445,7 +445,7 @@ static int __init xen_guest_init(void) > return 0; > > if (IS_ENABLED(CONFIG_XEN_VIRTIO)) > - virtio_set_mem_acc_cb(xen_virtio_mem_acc); > + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); > > if (!acpi_disabled) > xen_acpi_guest_init(); > diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c > index daa525df7bdc..1e797a043980 100644 > --- a/drivers/xen/grant-dma-ops.c > +++ b/drivers/xen/grant-dma-ops.c > @@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = { > .dma_supported = xen_grant_dma_supported, > }; > > -static bool xen_is_dt_grant_dma_device(struct device *dev) > -{ > - struct device_node *iommu_np; > - bool has_iommu; > - > - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); > - has_iommu = iommu_np && > - of_device_is_compatible(iommu_np, "xen,grant-dma"); > - of_node_put(iommu_np); > - > - return has_iommu; > -} > - > -bool xen_is_grant_dma_device(struct device *dev) > -{ > - /* XXX Handle only DT devices for now */ > - if (dev->of_node) > - return xen_is_dt_grant_dma_device(dev); > - > - return false; > -} > - > -bool xen_virtio_mem_acc(struct virtio_device *dev) > -{ > - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) > - return true; > - > - return xen_is_grant_dma_device(dev->dev.parent); > -} > - > static int xen_dt_grant_init_backend_domid(struct device *dev, > - struct xen_grant_dma_data *data) > + domid_t *backend_domid) > { > struct of_phandle_args iommu_spec; > > if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", > 0, &iommu_spec)) { > - dev_err(dev, "Cannot parse iommus property\n"); > + dev_dbg(dev, "Cannot parse iommus property\n"); > return -ESRCH; > } > > if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || > iommu_spec.args_count != 1) { > - dev_err(dev, "Incompatible IOMMU node\n"); > + dev_dbg(dev, "Incompatible IOMMU node\n"); > of_node_put(iommu_spec.np); > return -ESRCH; > } > @@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev, > * The endpoint ID here means the ID of the domain where the > * corresponding backend is running > */ > - data->backend_domid = iommu_spec.args[0]; > + *backend_domid = iommu_spec.args[0]; > > return 0; > } > > -void xen_grant_setup_dma_ops(struct device *dev) > +static int xen_grant_init_backend_domid(struct device *dev, > + domid_t *backend_domid) > +{ > + int ret = -ENODEV; > + > + if (dev->of_node) { > + ret = xen_dt_grant_init_backend_domid(dev, backend_domid); > + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) { > + dev_info(dev, "Using dom0 as backend\n"); > + *backend_domid = 0; > + ret = 0; > + } > + > + return ret; > +} > + > +static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid) > { > struct xen_grant_dma_data *data; > > @@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev) > if (!data) > goto err; > > - if (dev->of_node) { > - if (xen_dt_grant_init_backend_domid(dev, data)) > - goto err; > - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) { > - dev_info(dev, "Using dom0 as backend\n"); > - data->backend_domid = 0; > - } else { > - /* XXX ACPI device unsupported for now */ > - goto err; > - } > + data->backend_domid = backend_domid; > > if (store_xen_grant_dma_data(dev, data)) { > dev_err(dev, "Cannot store Xen grant DMA data\n"); > @@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev) > > bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) > { > - bool ret = xen_virtio_mem_acc(dev); > + domid_t backend_domid; > > - if (ret) > - xen_grant_setup_dma_ops(dev->dev.parent); > + if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) { > + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid); > + return true; > + } > > - return ret; > + return false; > } > > MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); > diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h > index b0766a660338..70073f5a2b54 100644 > --- a/include/xen/arm/xen-ops.h > +++ b/include/xen/arm/xen-ops.h > @@ -8,9 +8,7 @@ > static inline void xen_setup_dma_ops(struct device *dev) > { > #ifdef CONFIG_XEN > - if (xen_is_grant_dma_device(dev)) > - xen_grant_setup_dma_ops(dev); > - else if (xen_swiotlb_detect()) > + if (xen_swiotlb_detect()) > dev->dma_ops = &xen_swiotlb_dma_ops; > #endif > } > diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h > index a34f4271a2e9..47f11bec5e90 100644 > --- a/include/xen/xen-ops.h > +++ b/include/xen/xen-ops.h > @@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { } > #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ > > #ifdef CONFIG_XEN_GRANT_DMA_OPS > -void xen_grant_setup_dma_ops(struct device *dev); > -bool xen_is_grant_dma_device(struct device *dev); > -bool xen_virtio_mem_acc(struct virtio_device *dev); > bool xen_virtio_restricted_mem_acc(struct virtio_device *dev); > #else > -static inline void xen_grant_setup_dma_ops(struct device *dev) > -{ > -} > -static inline bool xen_is_grant_dma_device(struct device *dev) > -{ > - return false; > -} > - > struct virtio_device; > > -static inline bool xen_virtio_mem_acc(struct virtio_device *dev) > -{ > - return false; > -} > - > static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) > { > return false; > -- > 2.25.1 >
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 93c8ccbf2982..7d59765aef22 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -445,7 +445,7 @@ static int __init xen_guest_init(void) return 0; if (IS_ENABLED(CONFIG_XEN_VIRTIO)) - virtio_set_mem_acc_cb(xen_virtio_mem_acc); + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); if (!acpi_disabled) xen_acpi_guest_init(); diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index daa525df7bdc..1e797a043980 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -292,50 +292,20 @@ static const struct dma_map_ops xen_grant_dma_ops = { .dma_supported = xen_grant_dma_supported, }; -static bool xen_is_dt_grant_dma_device(struct device *dev) -{ - struct device_node *iommu_np; - bool has_iommu; - - iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); - has_iommu = iommu_np && - of_device_is_compatible(iommu_np, "xen,grant-dma"); - of_node_put(iommu_np); - - return has_iommu; -} - -bool xen_is_grant_dma_device(struct device *dev) -{ - /* XXX Handle only DT devices for now */ - if (dev->of_node) - return xen_is_dt_grant_dma_device(dev); - - return false; -} - -bool xen_virtio_mem_acc(struct virtio_device *dev) -{ - if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) - return true; - - return xen_is_grant_dma_device(dev->dev.parent); -} - static int xen_dt_grant_init_backend_domid(struct device *dev, - struct xen_grant_dma_data *data) + domid_t *backend_domid) { struct of_phandle_args iommu_spec; if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", 0, &iommu_spec)) { - dev_err(dev, "Cannot parse iommus property\n"); + dev_dbg(dev, "Cannot parse iommus property\n"); return -ESRCH; } if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || iommu_spec.args_count != 1) { - dev_err(dev, "Incompatible IOMMU node\n"); + dev_dbg(dev, "Incompatible IOMMU node\n"); of_node_put(iommu_spec.np); return -ESRCH; } @@ -346,12 +316,28 @@ static int xen_dt_grant_init_backend_domid(struct device *dev, * The endpoint ID here means the ID of the domain where the * corresponding backend is running */ - data->backend_domid = iommu_spec.args[0]; + *backend_domid = iommu_spec.args[0]; return 0; } -void xen_grant_setup_dma_ops(struct device *dev) +static int xen_grant_init_backend_domid(struct device *dev, + domid_t *backend_domid) +{ + int ret = -ENODEV; + + if (dev->of_node) { + ret = xen_dt_grant_init_backend_domid(dev, backend_domid); + } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) { + dev_info(dev, "Using dom0 as backend\n"); + *backend_domid = 0; + ret = 0; + } + + return ret; +} + +static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid) { struct xen_grant_dma_data *data; @@ -365,16 +351,7 @@ void xen_grant_setup_dma_ops(struct device *dev) if (!data) goto err; - if (dev->of_node) { - if (xen_dt_grant_init_backend_domid(dev, data)) - goto err; - } else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) { - dev_info(dev, "Using dom0 as backend\n"); - data->backend_domid = 0; - } else { - /* XXX ACPI device unsupported for now */ - goto err; - } + data->backend_domid = backend_domid; if (store_xen_grant_dma_data(dev, data)) { dev_err(dev, "Cannot store Xen grant DMA data\n"); @@ -392,12 +369,14 @@ void xen_grant_setup_dma_ops(struct device *dev) bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) { - bool ret = xen_virtio_mem_acc(dev); + domid_t backend_domid; - if (ret) - xen_grant_setup_dma_ops(dev->dev.parent); + if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) { + xen_grant_setup_dma_ops(dev->dev.parent, backend_domid); + return true; + } - return ret; + return false; } MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h index b0766a660338..70073f5a2b54 100644 --- a/include/xen/arm/xen-ops.h +++ b/include/xen/arm/xen-ops.h @@ -8,9 +8,7 @@ static inline void xen_setup_dma_ops(struct device *dev) { #ifdef CONFIG_XEN - if (xen_is_grant_dma_device(dev)) - xen_grant_setup_dma_ops(dev); - else if (xen_swiotlb_detect()) + if (xen_swiotlb_detect()) dev->dma_ops = &xen_swiotlb_dma_ops; #endif } diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index a34f4271a2e9..47f11bec5e90 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { } #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ #ifdef CONFIG_XEN_GRANT_DMA_OPS -void xen_grant_setup_dma_ops(struct device *dev); -bool xen_is_grant_dma_device(struct device *dev); -bool xen_virtio_mem_acc(struct virtio_device *dev); bool xen_virtio_restricted_mem_acc(struct virtio_device *dev); #else -static inline void xen_grant_setup_dma_ops(struct device *dev) -{ -} -static inline bool xen_is_grant_dma_device(struct device *dev) -{ - return false; -} - struct virtio_device; -static inline bool xen_virtio_mem_acc(struct virtio_device *dev) -{ - return false; -} - static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev) { return false;