diff mbox series

[33/34] drm/vmwgfx: Convert base IDR to XArray

Message ID 20190221184226.2149-65-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Convert DRM to XArray | expand

Commit Message

Matthew Wilcox (Oracle) Feb. 21, 2019, 6:42 p.m. UTC
Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 drivers/gpu/drm/vmwgfx/ttm_object.c | 29 ++++++++---------------------
 1 file changed, 8 insertions(+), 21 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 36990b80e790..8f394b94060b 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -77,8 +77,6 @@  struct ttm_object_file {
 /**
  * struct ttm_object_device
  *
- * @object_lock: lock that protects the object_hash hash table.
- *
  * @object_hash: hash table for fast lookup of object global names.
  *
  * @object_count: Per device object count.
@@ -87,14 +85,13 @@  struct ttm_object_file {
  */
 
 struct ttm_object_device {
-	spinlock_t object_lock;
 	struct drm_open_hash object_hash;
 	atomic_t object_count;
 	struct ttm_mem_global *mem_glob;
 	struct dma_buf_ops ops;
 	void (*dmabuf_release)(struct dma_buf *dma_buf);
 	size_t dma_buf_size;
-	struct idr idr;
+	struct xarray bases;
 };
 
 /**
@@ -172,15 +169,11 @@  int ttm_base_object_init(struct ttm_object_file *tfile,
 	base->ref_obj_release = ref_obj_release;
 	base->object_type = object_type;
 	kref_init(&base->refcount);
-	idr_preload(GFP_KERNEL);
-	spin_lock(&tdev->object_lock);
-	ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
-	spin_unlock(&tdev->object_lock);
-	idr_preload_end();
+	ret = xa_alloc(&tdev->bases, &base->handle, base, xa_limit_31b,
+			GFP_KERNEL);
 	if (ret < 0)
 		return ret;
 
-	base->handle = ret;
 	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
 	if (unlikely(ret != 0))
 		goto out_err1;
@@ -189,9 +182,7 @@  int ttm_base_object_init(struct ttm_object_file *tfile,
 
 	return 0;
 out_err1:
-	spin_lock(&tdev->object_lock);
-	idr_remove(&tdev->idr, base->handle);
-	spin_unlock(&tdev->object_lock);
+	xa_erase(&tdev->bases, base->handle);
 	return ret;
 }
 
@@ -201,9 +192,7 @@  static void ttm_release_base(struct kref *kref)
 	    container_of(kref, struct ttm_base_object, refcount);
 	struct ttm_object_device *tdev = base->tfile->tdev;
 
-	spin_lock(&tdev->object_lock);
-	idr_remove(&tdev->idr, base->handle);
-	spin_unlock(&tdev->object_lock);
+	xa_erase(&tdev->bases, base->handle);
 
 	/*
 	 * Note: We don't use synchronize_rcu() here because it's far
@@ -287,7 +276,7 @@  ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
 	struct ttm_base_object *base;
 
 	rcu_read_lock();
-	base = idr_find(&tdev->idr, key);
+	base = xa_load(&tdev->bases, key);
 
 	if (base && !kref_get_unless_zero(&base->refcount))
 		base = NULL;
@@ -534,13 +523,12 @@  ttm_object_device_init(struct ttm_mem_global *mem_glob,
 		return NULL;
 
 	tdev->mem_glob = mem_glob;
-	spin_lock_init(&tdev->object_lock);
 	atomic_set(&tdev->object_count, 0);
 	ret = drm_ht_create(&tdev->object_hash, hash_order);
 	if (ret != 0)
 		goto out_no_object_hash;
 
-	idr_init(&tdev->idr);
+	xa_init_flags(&tdev->bases, XA_FLAGS_ALLOC);
 	tdev->ops = *ops;
 	tdev->dmabuf_release = tdev->ops.release;
 	tdev->ops.release = ttm_prime_dmabuf_release;
@@ -559,8 +547,7 @@  void ttm_object_device_release(struct ttm_object_device **p_tdev)
 
 	*p_tdev = NULL;
 
-	WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
-	idr_destroy(&tdev->idr);
+	WARN_ON_ONCE(!xa_empty(&tdev->bases));
 	drm_ht_remove(&tdev->object_hash);
 
 	kfree(tdev);