@@ -144,6 +144,7 @@ int drm_gem_object_init(struct drm_device *dev,
return PTR_ERR(obj->filp);
kref_init(&obj->refcount);
+ mutex_init(&obj->prime_lock);
atomic_set(&obj->handle_count, 0);
obj->size = size;
@@ -165,6 +166,7 @@ int drm_gem_private_object_init(struct drm_device *dev,
obj->filp = NULL;
kref_init(&obj->refcount);
+ mutex_init(&obj->prime_lock);
atomic_set(&obj->handle_count, 0);
obj->size = size;
@@ -208,9 +210,9 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
drm_prime_remove_imported_buf_handle(&filp->prime,
obj->import_attach->dmabuf);
}
- if (obj->export_dma_buf) {
+ if (obj->dma_buf) {
drm_prime_remove_imported_buf_handle(&filp->prime,
- obj->export_dma_buf);
+ obj->dma_buf);
}
}
@@ -604,6 +606,9 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
+ *
+ * Must _not_ be called while the dev->struct_mutex lock is held, or any
+ * driver-specific lock that is required in the dma buf's ->release callback.
*/
void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
@@ -625,6 +630,18 @@ void drm_gem_object_handle_free(struct drm_gem_object *obj)
} else
spin_unlock(&dev->object_name_lock);
+ mutex_lock(&obj->prime_lock);
+ if (obj->dma_buf) {
+ struct dma_buf *buf = obj->dma_buf;
+ obj->dma_buf = NULL;
+ mutex_unlock(&obj->prime_lock);
+
+ /* We still hold a reference to the underlying GEM object, so
+ * this will free at most the dma buf object itself. */
+ dma_buf_put(buf);
+ } else
+ mutex_unlock(&obj->prime_lock);
+
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
@@ -84,36 +84,47 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
return 0;
}
- if (obj->export_dma_buf) {
- get_dma_buf(obj->export_dma_buf);
- *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
- drm_gem_object_unreference_unlocked(obj);
+ mutex_lock(&obj->prime_lock);
+ if (obj->dma_buf) {
+ get_dma_buf(obj->dma_buf);
} else {
buf = dev->driver->gem_prime_export(dev, obj, flags);
if (IS_ERR(buf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return PTR_ERR(buf);
+ ret = PTR_ERR(buf);
+ goto out;
}
- obj->export_dma_buf = buf;
- *prime_fd = dma_buf_fd(buf, flags);
+ /* Allocate a reference for the dma buf. */
+ drm_gem_object_reference(obj);
+
+ /* Allocate a reference for the re-export cache. */
+ get_dma_buf(buf);
+ obj->dma_buf = buf;
}
/* if we've exported this buffer the cheat and add it to the import list
* so we get the correct handle back
*/
ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
- obj->export_dma_buf, handle);
- if (ret) {
- drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&file_priv->prime.lock);
- return ret;
- }
+ obj->dma_buf, handle);
+ if (ret)
+ goto out;
+ /* Only set up the userspace fd once everything is done. */
+ *prime_fd = dma_buf_fd(obj->dma_buf, flags);
+out:
+ mutex_unlock(&obj->prime_lock);
mutex_unlock(&file_priv->prime.lock);
- return 0;
+
+ /* Check whether someone sneaky dropped the last userspace gem handle,
+ * clean up the mess if so. */
+ if (atomic_read(&obj->handle_count) == 0)
+ drm_gem_object_handle_free(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -133,21 +144,38 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
dma_buf, handle);
if (!ret) {
- ret = 0;
- goto out_put;
+ mutex_unlock(&file_priv->prime.lock);
+ dma_buf_put(dma_buf);
+
+ return 0;
}
/* never seen this one, need to import */
obj = dev->driver->gem_prime_import(dev, dma_buf);
if (IS_ERR(obj)) {
- ret = PTR_ERR(obj);
- goto out_put;
+ mutex_unlock(&file_priv->prime.lock);
+ dma_buf_put(dma_buf);
+
+ return PTR_ERR(obj);
}
+ mutex_lock(&obj->prime_lock);
+ /* Refill the export cache - this is only really important if the dma
+ * buf was the only userspace visible handle left and we're re-importing
+ * into the original exporter, in which case we've cleared obj->dma_buf
+ * already. Because we will create a GEM userspace handle below we only
+ * need to check for gem_close races if that fails.
+ */
+ if (!obj->dma_buf) {
+ obj->dma_buf = dma_buf;
+ get_dma_buf(dma_buf);
+ } else
+ WARN_ON(obj->dma_buf != dma_buf);
+ mutex_unlock(&obj->prime_lock);
+
ret = drm_gem_handle_create(file_priv, obj, handle);
- drm_gem_object_unreference_unlocked(obj);
if (ret)
- goto out_put;
+ goto fail_handle;
ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
dma_buf, *handle);
@@ -155,6 +183,8 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
goto fail;
mutex_unlock(&file_priv->prime.lock);
+ drm_gem_object_unreference_unlocked(obj);
+
return 0;
fail:
@@ -162,9 +192,13 @@ fail:
* to detach.. which seems ok..
*/
drm_gem_object_handle_unreference_unlocked(obj);
-out_put:
- dma_buf_put(dma_buf);
+fail_handle:
+ if (atomic_read(&obj->handle_count) == 0)
+ drm_gem_object_handle_free(obj);
mutex_unlock(&file_priv->prime.lock);
+ drm_gem_object_unreference_unlocked(obj);
+ dma_buf_put(dma_buf);
+
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
@@ -112,21 +112,7 @@ static void exynos_dmabuf_release(struct dma_buf *dmabuf)
DRM_DEBUG_PRIME("%s\n", __FILE__);
- /*
- * exynos_dmabuf_release() call means that file object's
- * f_count is 0 and it calls drm_gem_object_handle_unreference()
- * to drop the references that these values had been increased
- * at drm_prime_handle_to_fd()
- */
- if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
- exynos_gem_obj->base.export_dma_buf = NULL;
-
- /*
- * drop this gem object refcount to release allocated buffer
- * and resources.
- */
- drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
- }
+ drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
}
static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
@@ -67,11 +67,7 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf->priv;
- if (obj->base.export_dma_buf == dma_buf) {
- /* drop the reference on the export fd holds */
- obj->base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&obj->base);
- }
+ drm_gem_object_unreference_unlocked(&obj->base);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
@@ -59,10 +59,7 @@ static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct nouveau_bo *nvbo = dma_buf->priv;
- if (nvbo->gem->export_dma_buf == dma_buf) {
- nvbo->gem->export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(nvbo->gem);
- }
+ drm_gem_object_unreference_unlocked(nvbo->gem);
}
static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
@@ -59,11 +59,7 @@ static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct radeon_bo *bo = dma_buf->priv;
- if (bo->gem_base.export_dma_buf == dma_buf) {
- DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
- bo->gem_base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&bo->gem_base);
- }
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
}
static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
@@ -668,10 +668,31 @@ struct drm_gem_object {
void *driver_private;
- /* dma buf exported from this GEM object */
- struct dma_buf *export_dma_buf;
+ /**
+ * prime_lock - protects dma buf state of exported GEM objects
+ *
+ * Specifically dma_buf, but this can be used by drivers for
+ * other things.
+ */
+ struct mutex prime_lock;
+
+ /** dma_buf - dma buf associated with this GEM object
+ *
+ * This is a cache of the dma buf associated to this GEM object to
+ * ensure that there is only ever one dma buf for a given GEM object.
+ * Only kept around as long as there are still userspace handles around.
+ * If only the dma buf holds the last userspace-visible reference on
+ * this GEM object, dma_buf will be NULL, but set again on import.
+ * Protected by prime_lock.
+ */
+ struct dma_buf *dma_buf;
- /* dma buf attachment backing this object */
+ /**
+ * import_attach - dma buf attachment backing this object.
+ *
+ * Invariant over the lifetime
+ * of an object, hence needs no locking.
+ */
struct dma_buf_attachment *import_attach;
};
@@ -1651,24 +1672,6 @@ drm_gem_object_handle_reference(struct drm_gem_object *obj)
}
static inline void
-drm_gem_object_handle_unreference(struct drm_gem_object *obj)
-{
- if (obj == NULL)
- return;
-
- if (atomic_read(&obj->handle_count) == 0)
- return;
- /*
- * Must bump handle count first as this may be the last
- * ref, in which case the object would disappear before we
- * checked for a name
- */
- if (atomic_dec_and_test(&obj->handle_count))
- drm_gem_object_handle_free(obj);
- drm_gem_object_unreference(obj);
-}
-
-static inline void
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{
if (obj == NULL)
Well, actually add some, because currently there's exactly none: - in handle_to_fd we only take the file_priv's prime lock, but the underlying gem object we're exporting isn't per-file. - in each driver's dma_buf->release callback we don't grab any locks at all. Also, we're not protected by any object lifetime rules: We can export and GEM object then fclose that fd _while_ we re-export the same object again. If we're unlucky, we'll see a non-zero export_dma_buf, but the file's refcount is already zero. To fix this, we need two pieces: - We need to add some locking. I've opted for a per-object mutex instead of something more global because I see tons of places where drivers need to have finer-grained locking to avoid deadlocks when sharing buffers between devices in both directions. And since ttm has rather fine-grained locking I've though we shouldn't force drivers to unecessarily serialize this state. - We need to hold a reference onto the dma buf object while exported_dma_buf is non-NULL. Otherwise we'll always have races where the dma buf's file refcount is zero already, but export_dma_buf is still set. Now obviously this dma buf reference would lead to a refcount loop, so we need to break that at the right time. Do the same trick as for the gem flink name: As soon as all GEM userspace handles are gone (i.e. obj->handle_count == 0) we drop this dma_buf reference. This has a few funny complications: - We could hold the last dma buf reference (but by necessity not the last reference to the underlying GEM object). The only thing that can therefore happen is that the dma buf object gets freed and we call the driver's ->release callback. Which needs to drop its own reference to the underlying GEM object. To ensure that we don't end up with surprises kill the (already) unused handle_unreference variant and only expose the unlocked one (which is safer). - Userspace might race with us when exporting a dma_buf and close the last userspace GEM handle. Check for that and re-call handle_free to clean up the mess. Note that the current flink name code is suffering from the same race but doesn't hanlde it. A follow-up patch will fix this. - Userspace might drop all GEM handles, but still hold a reference through a dma buf object. To ensure that an export of this object after it has been imported will do the right thing, we also need to set up the dma buf cache again. Otherwise we could create a second dma buf when exporting it again. Also add some paranoia and check whether a re-import gives back the right dma_buf. Because this cache is now not only set on export but also on import and is always set if there's a dma buf associated for this GEM object (and some GEM userspace handles still around), drop the export_ prefix and just call it obj->dma_buf. v2: Be careful to not drop any references while holding locks. This only really affected -ENOMEM error paths, but could lead to a deadlock on one of the prime locks. Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch> --- drivers/gpu/drm/drm_gem.c | 21 ++++++- drivers/gpu/drm/drm_prime.c | 82 ++++++++++++++++++++-------- drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 16 +----- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 6 +- drivers/gpu/drm/nouveau/nouveau_prime.c | 5 +- drivers/gpu/drm/radeon/radeon_prime.c | 6 +- include/drm/drmP.h | 45 ++++++++------- 7 files changed, 105 insertions(+), 76 deletions(-)