Message ID | 20210115155226.3943181-1-daniel.vetter@ffwll.ch (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | dma-buf: Add debug option | expand |
Quoting Daniel Vetter (2021-01-15 15:52:26) > +static void mangle_sg_table(struct sg_table *sg_table) > +{ > +#ifdef CONFIG_DMABUF_DEBUG > + int i; > + struct scatterlist *sg; > + > + if (!sg_table) if (!IS_ENABLED(CONFIG_DMABUF_DEBUG) || IS_ERR_OR_NULL(sg_table)) > + return; Although NULL is not meant to be returned. -Chris
Am 15.01.21 um 16:52 schrieb Daniel Vetter: > We have too many people abusing the struct page they can get at but > really shouldn't in importers. Aside from that the backing page might > simply not exist (for dynamic p2p mappings) looking at it and using it > e.g. for mmap can also wreak the page handling of the exporter > completely. Importers really must go through the proper interface like > dma_buf_mmap for everything. > > I'm semi-tempted to enforce this for dynamic importers since those > really have no excuse at all to break the rules. > > Unfortuantely we can't store the right pointers somewhere safe to make > sure we oops on something recognizable, so best is to just wrangle > them a bit by flipping all the bits. At least on x86 kernel addresses > have all their high bits sets and the struct page array is fairly low > in the kernel mapping, so flipping all the bits gives us a very high > pointer in userspace and hence excellent chances for an invalid > dereference. > > v2: Add a note to the @map_dma_buf hook that exporters shouldn't do > fancy caching tricks, which would blow up with this address scrambling > trick here (Chris) > > Enable by default when CONFIG_DMA_API_DEBUG is enabled. > > v3: Only one copy of the mangle/unmangle code (Christian) > > v4: #ifdef, not #if (0day) > > Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v2) > Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> > Cc: Chris Wilson <chris@chris-wilson.co.uk> > Cc: Sumit Semwal <sumit.semwal@linaro.org> > Cc: "Christian König" <christian.koenig@amd.com> > Cc: David Stevens <stevensd@chromium.org> > Cc: linux-media@vger.kernel.org > Cc: linaro-mm-sig@lists.linaro.org > --- > drivers/dma-buf/Kconfig | 8 +++++++ > drivers/dma-buf/dma-buf.c | 48 +++++++++++++++++++++++++++++++++++---- > include/linux/dma-buf.h | 6 +++++ > 3 files changed, 58 insertions(+), 4 deletions(-) > > diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig > index 4f8224a6ac95..4e16c71c24b7 100644 > --- a/drivers/dma-buf/Kconfig > +++ b/drivers/dma-buf/Kconfig > @@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY > This is marked experimental because we don't yet have a consistent > execution context and memory management between drivers. > > +config DMABUF_DEBUG > + bool "DMA-BUF debug checks" > + default y if DMA_API_DEBUG > + help > + This option enables additional checks for DMA-BUF importers and > + exporters. Specifically it validates that importers do not peek at the > + underlying struct page when they import a buffer. > + > config DMABUF_SELFTESTS > tristate "Selftests for the dma-buf interfaces" > default n > diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c > index 1c9bd51db110..f676bab64f55 100644 > --- a/drivers/dma-buf/dma-buf.c > +++ b/drivers/dma-buf/dma-buf.c > @@ -666,6 +666,36 @@ void dma_buf_put(struct dma_buf *dmabuf) > } > EXPORT_SYMBOL_GPL(dma_buf_put); > > +static void mangle_sg_table(struct sg_table *sg_table) > +{ > +#ifdef CONFIG_DMABUF_DEBUG > + int i; > + struct scatterlist *sg; > + > + if (!sg_table) At least in the map case the sg_table returned could also be an ERR_PTR(). Might be even better to drop the __map_dma_buf wrappers and call the mangle after the error handling. Christian. > + return; > + > + /* To catch abuse of the underlying struct page by importers mix > + * up the bits, but take care to preserve the low SG_ bits to > + * not corrupt the sgt. The mixing is undone in __unmap_dma_buf > + * before passing the sgt back to the exporter. */ > + for_each_sgtable_sg(sg_table, sg, i) > + sg->page_link ^= ~0xffUL; > +#endif > + > +} > +static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, > + enum dma_data_direction direction) > +{ > + struct sg_table *sg_table; > + > + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); > + > + mangle_sg_table(sg_table); > + > + return sg_table; > +} > + > /** > * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list > * @dmabuf: [in] buffer to attach device to. > @@ -737,7 +767,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, > goto err_unlock; > } > > - sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL); > + sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); > if (!sgt) > sgt = ERR_PTR(-ENOMEM); > if (IS_ERR(sgt)) { > @@ -784,6 +814,16 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, > } > EXPORT_SYMBOL_GPL(dma_buf_attach); > > +static void __unmap_dma_buf(struct dma_buf_attachment *attach, > + struct sg_table *sg_table, > + enum dma_data_direction direction) > +{ > + /* uses XOR, hence this unmangles */ > + mangle_sg_table(sg_table); > + > + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); > +} > + > /** > * dma_buf_detach - Remove the given attachment from dmabuf's attachments list > * @dmabuf: [in] buffer to detach from. > @@ -802,7 +842,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) > if (dma_buf_is_dynamic(attach->dmabuf)) > dma_resv_lock(attach->dmabuf->resv, NULL); > > - dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir); > + __unmap_dma_buf(attach, attach->sgt, attach->dir); > > if (dma_buf_is_dynamic(attach->dmabuf)) { > dma_buf_unpin(attach); > @@ -924,7 +964,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, > } > } > > - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); > + sg_table = __map_dma_buf(attach, direction); > if (!sg_table) > sg_table = ERR_PTR(-ENOMEM); > > @@ -987,7 +1027,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, > if (dma_buf_is_dynamic(attach->dmabuf)) > dma_resv_assert_held(attach->dmabuf->resv); > > - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); > + __unmap_dma_buf(attach, sg_table, direction); > > if (dma_buf_is_dynamic(attach->dmabuf) && > !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) > diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h > index 628681bf6c99..efdc56b9d95f 100644 > --- a/include/linux/dma-buf.h > +++ b/include/linux/dma-buf.h > @@ -154,6 +154,12 @@ struct dma_buf_ops { > * On failure, returns a negative error value wrapped into a pointer. > * May also return -EINTR when a signal was received while being > * blocked. > + * > + * Note that exporters should not try to cache the scatter list, or > + * return the same one for multiple calls. Caching is done either by the > + * DMA-BUF code (for non-dynamic importers) or the importer. Ownership > + * of the scatter list is transferred to the caller, and returned by > + * @unmap_dma_buf. > */ > struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, > enum dma_data_direction);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 4f8224a6ac95..4e16c71c24b7 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -50,6 +50,14 @@ config DMABUF_MOVE_NOTIFY This is marked experimental because we don't yet have a consistent execution context and memory management between drivers. +config DMABUF_DEBUG + bool "DMA-BUF debug checks" + default y if DMA_API_DEBUG + help + This option enables additional checks for DMA-BUF importers and + exporters. Specifically it validates that importers do not peek at the + underlying struct page when they import a buffer. + config DMABUF_SELFTESTS tristate "Selftests for the dma-buf interfaces" default n diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 1c9bd51db110..f676bab64f55 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -666,6 +666,36 @@ void dma_buf_put(struct dma_buf *dmabuf) } EXPORT_SYMBOL_GPL(dma_buf_put); +static void mangle_sg_table(struct sg_table *sg_table) +{ +#ifdef CONFIG_DMABUF_DEBUG + int i; + struct scatterlist *sg; + + if (!sg_table) + return; + + /* To catch abuse of the underlying struct page by importers mix + * up the bits, but take care to preserve the low SG_ bits to + * not corrupt the sgt. The mixing is undone in __unmap_dma_buf + * before passing the sgt back to the exporter. */ + for_each_sgtable_sg(sg_table, sg, i) + sg->page_link ^= ~0xffUL; +#endif + +} +static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction direction) +{ + struct sg_table *sg_table; + + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); + + mangle_sg_table(sg_table); + + return sg_table; +} + /** * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list * @dmabuf: [in] buffer to attach device to. @@ -737,7 +767,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, goto err_unlock; } - sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL); + sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); if (!sgt) sgt = ERR_PTR(-ENOMEM); if (IS_ERR(sgt)) { @@ -784,6 +814,16 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, } EXPORT_SYMBOL_GPL(dma_buf_attach); +static void __unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sg_table, + enum dma_data_direction direction) +{ + /* uses XOR, hence this unmangles */ + mangle_sg_table(sg_table); + + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); +} + /** * dma_buf_detach - Remove the given attachment from dmabuf's attachments list * @dmabuf: [in] buffer to detach from. @@ -802,7 +842,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_lock(attach->dmabuf->resv, NULL); - dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir); + __unmap_dma_buf(attach, attach->sgt, attach->dir); if (dma_buf_is_dynamic(attach->dmabuf)) { dma_buf_unpin(attach); @@ -924,7 +964,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } } - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); + sg_table = __map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); @@ -987,7 +1027,7 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_assert_held(attach->dmabuf->resv); - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); + __unmap_dma_buf(attach, sg_table, direction); if (dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 628681bf6c99..efdc56b9d95f 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -154,6 +154,12 @@ struct dma_buf_ops { * On failure, returns a negative error value wrapped into a pointer. * May also return -EINTR when a signal was received while being * blocked. + * + * Note that exporters should not try to cache the scatter list, or + * return the same one for multiple calls. Caching is done either by the + * DMA-BUF code (for non-dynamic importers) or the importer. Ownership + * of the scatter list is transferred to the caller, and returned by + * @unmap_dma_buf. */ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction);