@@ -11,6 +11,7 @@
#include <linux/sizes.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <linux/xarray.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
@@ -393,10 +393,11 @@ static void submit_cleanup(struct kref *kref)
wake_up_all(&submit->gpu->fence_event);
if (submit->out_fence) {
- /* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->idr_lock);
- idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->idr_lock);
+ /*
+ * Remove from user fence array before dropping the reference,
+ * so fence can not be found in lookup anymore.
+ */
+ xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
dma_fence_put(submit->out_fence);
}
kfree(submit->pmrs);
@@ -1216,7 +1216,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
* pretends we didn't find a fence in that case.
*/
rcu_read_lock();
- fence = idr_find(&gpu->fence_idr, id);
+ fence = xa_load(&gpu->user_fences, id);
if (fence)
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
@@ -1700,7 +1700,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
- idr_init(&gpu->fence_idr);
+ xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC);
spin_lock_init(&gpu->fence_spinlock);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
@@ -1754,7 +1754,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
}
gpu->drm = NULL;
- idr_destroy(&gpu->fence_idr);
+ xa_destroy(&gpu->user_fences);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
@@ -1787,7 +1787,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
mutex_init(&gpu->sched_lock);
- mutex_init(&gpu->idr_lock);
/* Map registers: */
gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
@@ -117,8 +117,8 @@ struct etnaviv_gpu {
u32 idle_mask;
/* Fencing support */
- struct mutex idr_lock;
- struct idr fence_idr;
+ struct xarray user_fences;
+ u32 next_user_fence;
u32 next_fence;
u32 completed_fence;
wait_queue_head_t fence_event;
@@ -98,7 +98,7 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
struct etnaviv_gpu *gpu = submit->gpu;
- int ret = 0;
+ int ret;
/*
* Hold the sched lock across the whole operation to avoid jobs being
@@ -110,14 +110,11 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- mutex_lock(&gpu->idr_lock);
- submit->out_fence_id = idr_alloc_cyclic(&gpu->fence_idr,
- submit->out_fence, 0,
- INT_MAX, GFP_KERNEL);
- mutex_unlock(&gpu->idr_lock);
- if (submit->out_fence_id < 0) {
+ ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
+ submit->out_fence, xa_limit_32b,
+ &gpu->next_user_fence, GFP_KERNEL);
+ if (ret < 0) {
drm_sched_job_cleanup(&submit->sched_job);
- ret = -ENOMEM;
goto out_unlock;
}
This simplifies the driver code a bit, as XArray already provides internal locking. IDRs are implemented using XArrays anyways, so this drops one level of unneeded abstraction. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> --- drivers/gpu/drm/etnaviv/etnaviv_drv.h | 1 + drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 9 +++++---- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 7 +++---- drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 4 ++-- drivers/gpu/drm/etnaviv/etnaviv_sched.c | 13 +++++-------- 5 files changed, 16 insertions(+), 18 deletions(-)