@@ -157,11 +157,11 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
base->refcount_release = refcount_release;
base->ref_obj_release = ref_obj_release;
base->object_type = object_type;
- spin_lock(&tdev->object_lock);
kref_init(&base->refcount);
- ret = drm_ht_just_insert_please(&tdev->object_hash,
- &base->hash,
- (unsigned long)base, 31, 0, 0);
+ spin_lock(&tdev->object_lock);
+ ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
+ &base->hash,
+ (unsigned long)base, 31, 0, 0);
spin_unlock(&tdev->object_lock);
if (unlikely(ret != 0))
goto out_err0;
@@ -175,7 +175,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
return 0;
out_err1:
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
spin_unlock(&tdev->object_lock);
out_err0:
return ret;
@@ -189,8 +189,15 @@ static void ttm_release_base(struct kref *kref)
struct ttm_object_device *tdev = base->tfile->tdev;
spin_lock(&tdev->object_lock);
- (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
+ (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
spin_unlock(&tdev->object_lock);
+
+ /*
+ * Note: We don't use synchronize_rcu() here because it's far
+ * too slow. It's up to the user to free the object using
+ * call_rcu() or ttm_base_object_kfree().
+ */
+
if (base->refcount_release) {
ttm_object_file_unref(&base->tfile);
base->refcount_release(&base);
@@ -216,7 +223,7 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
int ret;
rcu_read_lock();
- ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
+ ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
Also move a kref_init() out of spinlocked region Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> --- drivers/gpu/drm/ttm/ttm_object.c | 21 ++++++++++++++------- 1 files changed, 14 insertions(+), 7 deletions(-)