diff mbox series

[13/34] drm/amdgpu: Convert ctx_handles to XArray

Message ID 20190221184226.2149-27-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Convert DRM to XArray | expand

Commit Message

Matthew Wilcox (Oracle) Feb. 21, 2019, 6:41 p.m. UTC
Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h       |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 42 ++++++++---------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  4 +--
 4 files changed, 19 insertions(+), 31 deletions(-)

Comments

Christian König Feb. 25, 2019, 4:07 p.m. UTC | #1
Am 21.02.19 um 19:41 schrieb Matthew Wilcox:
> Signed-off-by: Matthew Wilcox <willy@infradead.org>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu.h       |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 42 ++++++++---------------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h   |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c |  4 +--
>   4 files changed, 19 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> index 6a704aaa7dbe..c2650f143ba7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
> @@ -164,7 +164,7 @@ extern int amdgpu_si_support;
>   extern int amdgpu_cik_support;
>   #endif
>   
> -#define AMDGPU_VM_MAX_NUM_CTX			4096
> +#define AMDGPU_VM_CTX_LIMIT			XA_LIMIT(0, 4095)

IIRC we actually use 0 as reserved context value in some places.

Christian.

>   #define AMDGPU_SG_THRESHOLD			(256*1024*1024)
>   #define AMDGPU_DEFAULT_GTT_SIZE_MB		3072ULL /* 3GB by default */
>   #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index d85184b5b35c..bddc28b1c9ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -248,17 +248,17 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
>   		return -ENOMEM;
>   
>   	mutex_lock(&mgr->lock);
> -	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
> +	r = xa_alloc(&mgr->ctx_handles, id, ctx, AMDGPU_VM_CTX_LIMIT,
> +			GFP_KERNEL);
>   	if (r < 0) {
>   		mutex_unlock(&mgr->lock);
>   		kfree(ctx);
>   		return r;
>   	}
>   
> -	*id = (uint32_t)r;
>   	r = amdgpu_ctx_init(adev, priority, filp, ctx);
>   	if (r) {
> -		idr_remove(&mgr->ctx_handles, *id);
> +		xa_erase(&mgr->ctx_handles, *id);
>   		*id = 0;
>   		kfree(ctx);
>   	}
> @@ -290,7 +290,7 @@ static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
>   	struct amdgpu_ctx *ctx;
>   
>   	mutex_lock(&mgr->lock);
> -	ctx = idr_remove(&mgr->ctx_handles, id);
> +	ctx = xa_erase(&mgr->ctx_handles, id);
>   	if (ctx)
>   		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
>   	mutex_unlock(&mgr->lock);
> @@ -310,7 +310,7 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
>   
>   	mgr = &fpriv->ctx_mgr;
>   	mutex_lock(&mgr->lock);
> -	ctx = idr_find(&mgr->ctx_handles, id);
> +	ctx = xa_load(&mgr->ctx_handles, id);
>   	if (!ctx) {
>   		mutex_unlock(&mgr->lock);
>   		return -EINVAL;
> @@ -345,7 +345,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
>   
>   	mgr = &fpriv->ctx_mgr;
>   	mutex_lock(&mgr->lock);
> -	ctx = idr_find(&mgr->ctx_handles, id);
> +	ctx = xa_load(&mgr->ctx_handles, id);
>   	if (!ctx) {
>   		mutex_unlock(&mgr->lock);
>   		return -EINVAL;
> @@ -419,7 +419,7 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
>   	mgr = &fpriv->ctx_mgr;
>   
>   	mutex_lock(&mgr->lock);
> -	ctx = idr_find(&mgr->ctx_handles, id);
> +	ctx = xa_load(&mgr->ctx_handles, id);
>   	if (ctx)
>   		kref_get(&ctx->refcount);
>   	mutex_unlock(&mgr->lock);
> @@ -533,22 +533,18 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
>   void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
>   {
>   	mutex_init(&mgr->lock);
> -	idr_init(&mgr->ctx_handles);
> +	xa_init_flags(&mgr->ctx_handles, XA_FLAGS_ALLOC1);
>   }
>   
>   void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
>   {
>   	unsigned num_entities = amdgput_ctx_total_num_entities();
>   	struct amdgpu_ctx *ctx;
> -	struct idr *idp;
> -	uint32_t id, i;
> +	unsigned long id, i;
>   	long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
>   
> -	idp = &mgr->ctx_handles;
> -
>   	mutex_lock(&mgr->lock);
> -	idr_for_each_entry(idp, ctx, id) {
> -
> +	xa_for_each(&mgr->ctx_handles, id, ctx) {
>   		if (!ctx->adev) {
>   			mutex_unlock(&mgr->lock);
>   			return;
> @@ -568,13 +564,9 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
>   {
>   	unsigned num_entities = amdgput_ctx_total_num_entities();
>   	struct amdgpu_ctx *ctx;
> -	struct idr *idp;
> -	uint32_t id, i;
> -
> -	idp = &mgr->ctx_handles;
> -
> -	idr_for_each_entry(idp, ctx, id) {
> +	unsigned long id, i;
>   
> +	xa_for_each(&mgr->ctx_handles, id, ctx) {
>   		if (!ctx->adev)
>   			return;
>   
> @@ -591,18 +583,14 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
>   void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
>   {
>   	struct amdgpu_ctx *ctx;
> -	struct idr *idp;
> -	uint32_t id;
> +	unsigned long id;
>   
>   	amdgpu_ctx_mgr_entity_fini(mgr);
>   
> -	idp = &mgr->ctx_handles;
> -
> -	idr_for_each_entry(idp, ctx, id) {
> +	xa_for_each(&mgr->ctx_handles, id, ctx) {
>   		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
>   			DRM_ERROR("ctx %p is still alive\n", ctx);
>   	}
> -
> -	idr_destroy(&mgr->ctx_handles);
> +	xa_destroy(&mgr->ctx_handles);
>   	mutex_destroy(&mgr->lock);
>   }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> index b3b012c0a7da..011b1f15308a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> @@ -55,7 +55,7 @@ struct amdgpu_ctx_mgr {
>   	struct amdgpu_device	*adev;
>   	struct mutex		lock;
>   	/* protected by lock */
> -	struct idr		ctx_handles;
> +	struct xarray		ctx_handles;
>   };
>   
>   extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
> index 1cafe8d83a4d..278b4bd98dcc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
> @@ -57,14 +57,14 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
>   	struct drm_file *file;
>   	struct amdgpu_fpriv *fpriv;
>   	struct amdgpu_ctx *ctx;
> -	uint32_t id;
> +	unsigned long id;
>   
>   	if (!filp)
>   		return -EINVAL;
>   
>   	file = filp->private_data;
>   	fpriv = file->driver_priv;
> -	idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
> +	xa_for_each(&fpriv->ctx_mgr.ctx_handles, id, ctx)
>   		amdgpu_ctx_priority_override(ctx, priority);
>   
>   	fput(filp);
Matthew Wilcox (Oracle) Feb. 25, 2019, 4:39 p.m. UTC | #2
On Mon, Feb 25, 2019 at 05:07:24PM +0100, Christian König wrote:
> > -#define AMDGPU_VM_MAX_NUM_CTX			4096
> > +#define AMDGPU_VM_CTX_LIMIT			XA_LIMIT(0, 4095)
> 
> IIRC we actually use 0 as reserved context value in some places.

That's OK; the ALLOC1 prevents it from using index 0.

You can change it to be XA_LIMIT(1, 4095) if you think that'll be clearer;
it'll be slightly less efficient assembly (a 64-bit mov-immediate instead
of a 32-bit mov-immediate), but it's your driver, so it's up to you.

> > @@ -533,22 +533,18 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
> >   void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
> >   {
> >   	mutex_init(&mgr->lock);
> > -	idr_init(&mgr->ctx_handles);
> > +	xa_init_flags(&mgr->ctx_handles, XA_FLAGS_ALLOC1);
> >   }
> >   void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
> >   {
Christian König Feb. 25, 2019, 4:59 p.m. UTC | #3
Am 25.02.19 um 17:39 schrieb Matthew Wilcox:
> On Mon, Feb 25, 2019 at 05:07:24PM +0100, Christian König wrote:
>>> -#define AMDGPU_VM_MAX_NUM_CTX			4096
>>> +#define AMDGPU_VM_CTX_LIMIT			XA_LIMIT(0, 4095)
>> IIRC we actually use 0 as reserved context value in some places.
> That's OK; the ALLOC1 prevents it from using index 0.
>
> You can change it to be XA_LIMIT(1, 4095) if you think that'll be clearer;
> it'll be slightly less efficient assembly (a 64-bit mov-immediate instead
> of a 32-bit mov-immediate), but it's your driver, so it's up to you.

A code comment should do as well.

But thinking more about it please DON'T actually change the 
AMDGPU_VM_MAX_NUM_CTX define.

That one needs to be used to calculate the reserved GPU space to map the 
context save area, and I really don't want to deal with a XA_LIMIT in 
that calculation :)

I'm really wondering why that doesn't go up in a boom during compilation....

Christian.

>
>>> @@ -533,22 +533,18 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
>>>    void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
>>>    {
>>>    	mutex_init(&mgr->lock);
>>> -	idr_init(&mgr->ctx_handles);
>>> +	xa_init_flags(&mgr->ctx_handles, XA_FLAGS_ALLOC1);
>>>    }
>>>    void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
>>>    {
Matthew Wilcox (Oracle) Feb. 25, 2019, 6:47 p.m. UTC | #4
On Mon, Feb 25, 2019 at 04:59:55PM +0000, Koenig, Christian wrote:
> Am 25.02.19 um 17:39 schrieb Matthew Wilcox:
> > On Mon, Feb 25, 2019 at 05:07:24PM +0100, Christian König wrote:
> >>> -#define AMDGPU_VM_MAX_NUM_CTX			4096
> >>> +#define AMDGPU_VM_CTX_LIMIT			XA_LIMIT(0, 4095)
> >> IIRC we actually use 0 as reserved context value in some places.
> > That's OK; the ALLOC1 prevents it from using index 0.
> >
> > You can change it to be XA_LIMIT(1, 4095) if you think that'll be clearer;
> > it'll be slightly less efficient assembly (a 64-bit mov-immediate instead
> > of a 32-bit mov-immediate), but it's your driver, so it's up to you.
> 
> A code comment should do as well.
> 
> But thinking more about it please DON'T actually change the 
> AMDGPU_VM_MAX_NUM_CTX define.
> 
> That one needs to be used to calculate the reserved GPU space to map the 
> context save area, and I really don't want to deal with a XA_LIMIT in 
> that calculation :)
> 
> I'm really wondering why that doesn't go up in a boom during compilation....

Maybe that code isn't merged in Linus' tree yet?  I was basing this on
5.0-rc5 and there's no mention of AMDGPU_VM_MAX_NUM_CTX outside these
usages.  If I'd seen another usage, I wouldn't've changed it.

The rebasing I'm going to have to do on -rc1 is going to be epically
challenging.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6a704aaa7dbe..c2650f143ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -164,7 +164,7 @@  extern int amdgpu_si_support;
 extern int amdgpu_cik_support;
 #endif
 
-#define AMDGPU_VM_MAX_NUM_CTX			4096
+#define AMDGPU_VM_CTX_LIMIT			XA_LIMIT(0, 4095)
 #define AMDGPU_SG_THRESHOLD			(256*1024*1024)
 #define AMDGPU_DEFAULT_GTT_SIZE_MB		3072ULL /* 3GB by default */
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d85184b5b35c..bddc28b1c9ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -248,17 +248,17 @@  static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
 		return -ENOMEM;
 
 	mutex_lock(&mgr->lock);
-	r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
+	r = xa_alloc(&mgr->ctx_handles, id, ctx, AMDGPU_VM_CTX_LIMIT,
+			GFP_KERNEL);
 	if (r < 0) {
 		mutex_unlock(&mgr->lock);
 		kfree(ctx);
 		return r;
 	}
 
-	*id = (uint32_t)r;
 	r = amdgpu_ctx_init(adev, priority, filp, ctx);
 	if (r) {
-		idr_remove(&mgr->ctx_handles, *id);
+		xa_erase(&mgr->ctx_handles, *id);
 		*id = 0;
 		kfree(ctx);
 	}
@@ -290,7 +290,7 @@  static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
 	struct amdgpu_ctx *ctx;
 
 	mutex_lock(&mgr->lock);
-	ctx = idr_remove(&mgr->ctx_handles, id);
+	ctx = xa_erase(&mgr->ctx_handles, id);
 	if (ctx)
 		kref_put(&ctx->refcount, amdgpu_ctx_do_release);
 	mutex_unlock(&mgr->lock);
@@ -310,7 +310,7 @@  static int amdgpu_ctx_query(struct amdgpu_device *adev,
 
 	mgr = &fpriv->ctx_mgr;
 	mutex_lock(&mgr->lock);
-	ctx = idr_find(&mgr->ctx_handles, id);
+	ctx = xa_load(&mgr->ctx_handles, id);
 	if (!ctx) {
 		mutex_unlock(&mgr->lock);
 		return -EINVAL;
@@ -345,7 +345,7 @@  static int amdgpu_ctx_query2(struct amdgpu_device *adev,
 
 	mgr = &fpriv->ctx_mgr;
 	mutex_lock(&mgr->lock);
-	ctx = idr_find(&mgr->ctx_handles, id);
+	ctx = xa_load(&mgr->ctx_handles, id);
 	if (!ctx) {
 		mutex_unlock(&mgr->lock);
 		return -EINVAL;
@@ -419,7 +419,7 @@  struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
 	mgr = &fpriv->ctx_mgr;
 
 	mutex_lock(&mgr->lock);
-	ctx = idr_find(&mgr->ctx_handles, id);
+	ctx = xa_load(&mgr->ctx_handles, id);
 	if (ctx)
 		kref_get(&ctx->refcount);
 	mutex_unlock(&mgr->lock);
@@ -533,22 +533,18 @@  int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 {
 	mutex_init(&mgr->lock);
-	idr_init(&mgr->ctx_handles);
+	xa_init_flags(&mgr->ctx_handles, XA_FLAGS_ALLOC1);
 }
 
 void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
 {
 	unsigned num_entities = amdgput_ctx_total_num_entities();
 	struct amdgpu_ctx *ctx;
-	struct idr *idp;
-	uint32_t id, i;
+	unsigned long id, i;
 	long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
 
-	idp = &mgr->ctx_handles;
-
 	mutex_lock(&mgr->lock);
-	idr_for_each_entry(idp, ctx, id) {
-
+	xa_for_each(&mgr->ctx_handles, id, ctx) {
 		if (!ctx->adev) {
 			mutex_unlock(&mgr->lock);
 			return;
@@ -568,13 +564,9 @@  void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 {
 	unsigned num_entities = amdgput_ctx_total_num_entities();
 	struct amdgpu_ctx *ctx;
-	struct idr *idp;
-	uint32_t id, i;
-
-	idp = &mgr->ctx_handles;
-
-	idr_for_each_entry(idp, ctx, id) {
+	unsigned long id, i;
 
+	xa_for_each(&mgr->ctx_handles, id, ctx) {
 		if (!ctx->adev)
 			return;
 
@@ -591,18 +583,14 @@  void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
 {
 	struct amdgpu_ctx *ctx;
-	struct idr *idp;
-	uint32_t id;
+	unsigned long id;
 
 	amdgpu_ctx_mgr_entity_fini(mgr);
 
-	idp = &mgr->ctx_handles;
-
-	idr_for_each_entry(idp, ctx, id) {
+	xa_for_each(&mgr->ctx_handles, id, ctx) {
 		if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
 			DRM_ERROR("ctx %p is still alive\n", ctx);
 	}
-
-	idr_destroy(&mgr->ctx_handles);
+	xa_destroy(&mgr->ctx_handles);
 	mutex_destroy(&mgr->lock);
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index b3b012c0a7da..011b1f15308a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -55,7 +55,7 @@  struct amdgpu_ctx_mgr {
 	struct amdgpu_device	*adev;
 	struct mutex		lock;
 	/* protected by lock */
-	struct idr		ctx_handles;
+	struct xarray		ctx_handles;
 };
 
 extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 1cafe8d83a4d..278b4bd98dcc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -57,14 +57,14 @@  static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
 	struct drm_file *file;
 	struct amdgpu_fpriv *fpriv;
 	struct amdgpu_ctx *ctx;
-	uint32_t id;
+	unsigned long id;
 
 	if (!filp)
 		return -EINVAL;
 
 	file = filp->private_data;
 	fpriv = file->driver_priv;
-	idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+	xa_for_each(&fpriv->ctx_mgr.ctx_handles, id, ctx)
 		amdgpu_ctx_priority_override(ctx, priority);
 
 	fput(filp);