diff mbox

[RFC,2/2] drm/i915: Clean-up PPGTT on context destruction

Message ID 1423771502-9766-3-git-send-email-rafael.barbalho@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

rafael.barbalho@intel.com Feb. 12, 2015, 8:05 p.m. UTC
From: Rafael Barbalho <rafael.barbalho@intel.com>

With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
cleaned up so that the PPGTT PDE & PTE allocations can be freed.

This problem only shows up with full PPGTT because an object's VMA is
only cleaned-up when the object is destroyed. However, if the object has
been shared between multiple processes this may not happen, which leads to
references to the PPGTT still being kept the object was shared.

Under android the sharing of GEM objects is a fairly common operation, thus
the clean-up has to be more agressive.

Signed-off-by: Rafael Barbalho <rafael.barbalho@intel.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c         |  7 +++---
 drivers/gpu/drm/i915/i915_gem_context.c |  2 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c     | 43 ++++++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem_gtt.h     |  7 ++++++
 4 files changed, 54 insertions(+), 5 deletions(-)

Comments

Chris Wilson Feb. 12, 2015, 9:03 p.m. UTC | #1
On Thu, Feb 12, 2015 at 08:05:02PM +0000, rafael.barbalho@intel.com wrote:
> From: Rafael Barbalho <rafael.barbalho@intel.com>
> 
> With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
> cleaned up so that the PPGTT PDE & PTE allocations can be freed.
> 
> This problem only shows up with full PPGTT because an object's VMA is
> only cleaned-up when the object is destroyed. However, if the object has
> been shared between multiple processes this may not happen, which leads to
> references to the PPGTT still being kept the object was shared.
> 
> Under android the sharing of GEM objects is a fairly common operation, thus
> the clean-up has to be more agressive.

Not quite. You need an active refcount as we do not expect close(fd) to
stall. The trick is to "simply" use requests to retire vma (as well as
the object management it does today, though that just becomes a second
layer for GEM API management, everything else goes through vma).
-Chris
Daniel Vetter Feb. 13, 2015, 9:51 a.m. UTC | #2
On Thu, Feb 12, 2015 at 08:05:02PM +0000, rafael.barbalho@intel.com wrote:
> From: Rafael Barbalho <rafael.barbalho@intel.com>
> 
> With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
> cleaned up so that the PPGTT PDE & PTE allocations can be freed.
> 
> This problem only shows up with full PPGTT because an object's VMA is
> only cleaned-up when the object is destroyed. However, if the object has
> been shared between multiple processes this may not happen, which leads to
> references to the PPGTT still being kept the object was shared.
> 
> Under android the sharing of GEM objects is a fairly common operation, thus
> the clean-up has to be more agressive.
> 
> Signed-off-by: Rafael Barbalho <rafael.barbalho@intel.com>
> Cc: Daniel Vetter <daniel@ffwll.ch>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>

So when we've merged this we iirc talked about this issue and decided that
the shrinker should be good enough in cleaning up the crap from shared
objects. Not a pretty solution, but it should have worked.

Is this again the lowmemory killer wreaking havoc with our i915 shrinker,
or is there something else going on? And do you have some igt testcase for
this? If sharing is all that's required the following should do the trick:
1. allocate obj
2. create new context
3. do dummy execbuf with that obj to map it into the ppgtt
4. free context
5. goto 2 often enough to OOM

The shrinker should eventually kick in and clean up, but maybe I'm wrong
about that ...

One thing I've thought of is that if the shared object is pinned as a
scanout buffer then the shrinker won't touch it. But not sure whether you
can actually do this to end up with a stable leak - after each pageflip
the shrinker evict the vma/obj for this eventually.

> ---
>  drivers/gpu/drm/i915/i915_gem.c         |  7 +++---
>  drivers/gpu/drm/i915/i915_gem_context.c |  2 +-
>  drivers/gpu/drm/i915/i915_gem_gtt.c     | 43 ++++++++++++++++++++++++++++++++-
>  drivers/gpu/drm/i915/i915_gem_gtt.h     |  7 ++++++
>  4 files changed, 54 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 60b8bd1..e509d89 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -4529,11 +4529,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
>  		return;
>  
>  	vm = vma->vm;
> +	list_del(&vma->vma_link);
>  
> -	if (!i915_is_ggtt(vm))
> +	if (!i915_is_ggtt(vm)) {
> +		list_del(&vma->vm_link);
>  		i915_ppgtt_put(i915_vm_to_ppgtt(vm));
> -
> -	list_del(&vma->vma_link);
> +	}
>  
>  	kfree(vma);
>  }
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index a5221d8..4319a93 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -140,7 +140,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
>  	if (i915.enable_execlists)
>  		intel_lr_context_free(ctx);
>  
> -	i915_ppgtt_put(ctx->ppgtt);
> +	i915_ppgtt_destroy(ctx->ppgtt);
>  
>  	if (ctx->legacy_hw_ctx.rcs_state)
>  		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 6f410cf..9ef2f67 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -1097,6 +1097,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
>  	else
>  		BUG();
>  }
> +
>  int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
>  {
>  	struct drm_i915_private *dev_priv = dev->dev_private;
> @@ -1108,6 +1109,8 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
>  		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
>  			    ppgtt->base.total);
>  		i915_init_vm(dev_priv, &ppgtt->base);
> +
> +		INIT_LIST_HEAD(&ppgtt->vma_list);
>  	}
>  
>  	return ret;
> @@ -1177,14 +1180,49 @@ void  i915_ppgtt_release(struct kref *kref)
>  	/* vmas should already be unbound */
>  	WARN_ON(!list_empty(&ppgtt->base.active_list));
>  	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
> +	WARN_ON(!list_empty(&ppgtt->vma_list));
>  
>  	list_del(&ppgtt->base.global_link);
>  	drm_mm_takedown(&ppgtt->base.mm);
>  
>  	ppgtt->base.cleanup(&ppgtt->base);
> +
>  	kfree(ppgtt);
>  }
>  
> +void
> +i915_ppgtt_destroy(struct i915_hw_ppgtt *ppgtt)

This is misnamed since what it really does is unbind everything and then
drop the reference. Unbinding everything is already implemented as
i915_gem_evict_vm.

Also unconditionally evicting the entire vm upon context destruction means
we essentially assume a 1:1 link between ctx and ppgtt. Which is currently
true but will change with the interfaces planned for buffered svm.

I think we need a new ctx_use_count besides the pointer refcount that we
inc/dec on context desctruction. And then we only call evict_vm when that
drops to 0.

> +{
> +	struct i915_vma *vma, *tmp;
> +	struct i915_address_space *vm;
> +	int ret;
> +
> +	if (!ppgtt)
> +		return;
> +
> +	vm = &ppgtt->base;
> +
> +	/*
> +	 * If this fires it means that the context reference counting went
> +	 * awry.
> +	 */
> +	WARN_ON(!list_empty(&ppgtt->base.active_list));
> +
> +	if (!list_empty(&ppgtt->vma_list))
> +		list_for_each_entry_safe(vma, tmp, &ppgtt->vma_list, vm_link) {
> +			WARN_ON(vma->pin_count != 0);
> +			/*
> +			 * The object should be inactive at this point, thus
> +			 * its pin_count should be 0. We will zero it anyway
> +			 * make sure that the unbind call succeeds.
> +			 */
> +			vma->pin_count = 0;
> +			ret = i915_vma_unbind(vma);
> +		}
> +
> +	i915_ppgtt_put(ppgtt);

Imo this should be moved back into the ctx destruction code, hiding it
liek this makes auditing the refcounting a lot harder. Especially if the
wrapping function doesn't use the magic get/put words used for
refcounting.

Cheers, Daniel

> +}
> +
>  static void
>  ppgtt_bind_vma(struct i915_vma *vma,
>  	       enum i915_cache_level cache_level,
> @@ -2109,6 +2147,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  		return ERR_PTR(-ENOMEM);
>  
>  	INIT_LIST_HEAD(&vma->vma_link);
> +	INIT_LIST_HEAD(&vma->vm_link);

We have that already, right below ;-)

Cheers, Daniel

>  	INIT_LIST_HEAD(&vma->mm_list);
>  	INIT_LIST_HEAD(&vma->exec_list);
>  	vma->vm = vm;
> @@ -2142,8 +2181,10 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  	if (i915_is_ggtt(vm))
>  		list_add(&vma->vma_link, &obj->vma_list);
>  	else {
> +		struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
>  		list_add_tail(&vma->vma_link, &obj->vma_list);
> -		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
> +		i915_ppgtt_get(ppgtt);
> +		list_add_tail(&vma->vm_link, &ppgtt->vma_list);
>  	}
>  
>  	return vma;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index d5c14af..323bbc6 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -128,6 +128,8 @@ struct i915_vma {
>  
>  	struct list_head vma_link; /* Link in the object's VMA list */
>  
> +	struct list_head vm_link; /* Link in the VM's VMA list */
> +
>  	/** This vma's place in the batchbuffer or on the eviction list */
>  	struct list_head exec_list;
>  
> @@ -149,6 +151,7 @@ struct i915_vma {
>  	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
>  	 * bits with absolutely no headroom. So use 4 bits. */
>  	unsigned int pin_count:4;
> +
>  #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
>  
>  	/** Unmap an object from an address space. This usually consists of
> @@ -262,6 +265,8 @@ struct i915_hw_ppgtt {
>  
>  	struct drm_i915_file_private *file_priv;
>  
> +	struct list_head vma_list;
> +
>  	int (*enable)(struct i915_hw_ppgtt *ppgtt);
>  	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
>  			 struct intel_engine_cs *ring);
> @@ -280,6 +285,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev);
>  void i915_ppgtt_release(struct kref *kref);
>  struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
>  					struct drm_i915_file_private *fpriv);
> +void i915_ppgtt_destroy(struct i915_hw_ppgtt *ppgtt);
> +
>  static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
>  {
>  	if (ppgtt)
> -- 
> 2.3.0
>
Daniel Vetter Feb. 13, 2015, 9:55 a.m. UTC | #3
On Thu, Feb 12, 2015 at 09:03:06PM +0000, Chris Wilson wrote:
> On Thu, Feb 12, 2015 at 08:05:02PM +0000, rafael.barbalho@intel.com wrote:
> > From: Rafael Barbalho <rafael.barbalho@intel.com>
> > 
> > With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
> > cleaned up so that the PPGTT PDE & PTE allocations can be freed.
> > 
> > This problem only shows up with full PPGTT because an object's VMA is
> > only cleaned-up when the object is destroyed. However, if the object has
> > been shared between multiple processes this may not happen, which leads to
> > references to the PPGTT still being kept the object was shared.
> > 
> > Under android the sharing of GEM objects is a fairly common operation, thus
> > the clean-up has to be more agressive.
> 
> Not quite. You need an active refcount as we do not expect close(fd) to
> stall. The trick is to "simply" use requests to retire vma (as well as
> the object management it does today, though that just becomes a second
> layer for GEM API management, everything else goes through vma).

Linking into the ctx unref should give us that for free since requests do
hold a reference on the context. So this will only be run when the buffers
are idle.

Well except that our unbind code is too dense to do that correctly for
shared buffers, so we need to move obj->active to vma->active first.

And yeah the commit message should have explained this.
-Daniel
Chris Wilson Feb. 13, 2015, 10:05 a.m. UTC | #4
On Fri, Feb 13, 2015 at 10:51:36AM +0100, Daniel Vetter wrote:
> On Thu, Feb 12, 2015 at 08:05:02PM +0000, rafael.barbalho@intel.com wrote:
> > From: Rafael Barbalho <rafael.barbalho@intel.com>
> > 
> > With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
> > cleaned up so that the PPGTT PDE & PTE allocations can be freed.
> > 
> > This problem only shows up with full PPGTT because an object's VMA is
> > only cleaned-up when the object is destroyed. However, if the object has
> > been shared between multiple processes this may not happen, which leads to
> > references to the PPGTT still being kept the object was shared.
> > 
> > Under android the sharing of GEM objects is a fairly common operation, thus
> > the clean-up has to be more agressive.
> > 
> > Signed-off-by: Rafael Barbalho <rafael.barbalho@intel.com>
> > Cc: Daniel Vetter <daniel@ffwll.ch>
> > Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> 
> So when we've merged this we iirc talked about this issue and decided that
> the shrinker should be good enough in cleaning up the crap from shared
> objects. Not a pretty solution, but it should have worked.
> 
> Is this again the lowmemory killer wreaking havoc with our i915 shrinker,
> or is there something else going on? And do you have some igt testcase for
> this? If sharing is all that's required the following should do the trick:
> 1. allocate obj
> 2. create new context
> 3. do dummy execbuf with that obj to map it into the ppgtt
> 4. free context
> 5. goto 2 often enough to OOM

You know I have patches to fix all of this... It just happens to fall
out of tracking vma in requests, and by extension vm.
-Chris
Chris Wilson Feb. 13, 2015, 10:34 a.m. UTC | #5
On Fri, Feb 13, 2015 at 10:55:46AM +0100, Daniel Vetter wrote:
> On Thu, Feb 12, 2015 at 09:03:06PM +0000, Chris Wilson wrote:
> > On Thu, Feb 12, 2015 at 08:05:02PM +0000, rafael.barbalho@intel.com wrote:
> > > From: Rafael Barbalho <rafael.barbalho@intel.com>
> > > 
> > > With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
> > > cleaned up so that the PPGTT PDE & PTE allocations can be freed.
> > > 
> > > This problem only shows up with full PPGTT because an object's VMA is
> > > only cleaned-up when the object is destroyed. However, if the object has
> > > been shared between multiple processes this may not happen, which leads to
> > > references to the PPGTT still being kept the object was shared.
> > > 
> > > Under android the sharing of GEM objects is a fairly common operation, thus
> > > the clean-up has to be more agressive.
> > 
> > Not quite. You need an active refcount as we do not expect close(fd) to
> > stall. The trick is to "simply" use requests to retire vma (as well as
> > the object management it does today, though that just becomes a second
> > layer for GEM API management, everything else goes through vma).
> 
> Linking into the ctx unref should give us that for free since requests do
> hold a reference on the context. So this will only be run when the buffers
> are idle.
> 
> Well except that our unbind code is too dense to do that correctly for
> shared buffers, so we need to move obj->active to vma->active first.

We unbind vma, so what do you mean?

This is how I forsee the code:

static int context_idr_cleanup(int id, void *p, void *data)
{
        struct intel_context *ctx = p;

        if (ctx->ppgtt && !i915_gem_context_is_default(ctx)) {
                struct list_head *list;
                struct i915_vma *vma;

                /* Decouple the remaining vma to keep the next lookup fast */
                list = &ctx->ppgtt->base.vma_list;
                while (!list_empty(list)) {
                        vma = list_first_entry(list, typeof(*vma), vm_link);
                        list_del_init(&vma->vm_link);
                        list_del_init(&vma->obj_link);
                        i915_vma_put(vma);
                }

                /* Drop active references to this vm upon retire */
                ctx->ppgtt->base.closed = true;

                /* Drop all inactive references (via vma->vm reference) */
                list = &ctx->ppgtt->base.inactive_list;
                while (!list_empty(list)) {
                        struct drm_i915_gem_object *obj;
                        int ret;

                        vma = list_first_entry(list, typeof(*vma), mm_list);
                        obj = vma->obj;

                        drm_gem_object_reference(&obj->base);
                        ret = i915_vma_unbind(vma);
                        drm_gem_object_unreference(&obj->base);
                        if (WARN_ON(ret))
                                break;
                }
        }

        ctx->file_priv = NULL;
        i915_gem_context_unreference(ctx);

        return 0;
}
-Chris
Daniel Vetter Feb. 23, 2015, 4:41 p.m. UTC | #6
On Fri, Feb 13, 2015 at 10:34:51AM +0000, Chris Wilson wrote:
> On Fri, Feb 13, 2015 at 10:55:46AM +0100, Daniel Vetter wrote:
> > On Thu, Feb 12, 2015 at 09:03:06PM +0000, Chris Wilson wrote:
> > > On Thu, Feb 12, 2015 at 08:05:02PM +0000, rafael.barbalho@intel.com wrote:
> > > > From: Rafael Barbalho <rafael.barbalho@intel.com>
> > > > 
> > > > With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
> > > > cleaned up so that the PPGTT PDE & PTE allocations can be freed.
> > > > 
> > > > This problem only shows up with full PPGTT because an object's VMA is
> > > > only cleaned-up when the object is destroyed. However, if the object has
> > > > been shared between multiple processes this may not happen, which leads to
> > > > references to the PPGTT still being kept the object was shared.
> > > > 
> > > > Under android the sharing of GEM objects is a fairly common operation, thus
> > > > the clean-up has to be more agressive.
> > > 
> > > Not quite. You need an active refcount as we do not expect close(fd) to
> > > stall. The trick is to "simply" use requests to retire vma (as well as
> > > the object management it does today, though that just becomes a second
> > > layer for GEM API management, everything else goes through vma).
> > 
> > Linking into the ctx unref should give us that for free since requests do
> > hold a reference on the context. So this will only be run when the buffers
> > are idle.
> > 
> > Well except that our unbind code is too dense to do that correctly for
> > shared buffers, so we need to move obj->active to vma->active first.
> 
> We unbind vma, so what do you mean?

The unbind of the vma will block since we track active per-obj instead of
per-vma. Which is kinda not that cool for a kref_put cleanup function.

But yeah the below is what I had in mind too, with the mentioned nuisance
fixed.
-Daniel

> 
> This is how I forsee the code:
> 
> static int context_idr_cleanup(int id, void *p, void *data)
> {
>         struct intel_context *ctx = p;
> 
>         if (ctx->ppgtt && !i915_gem_context_is_default(ctx)) {
>                 struct list_head *list;
>                 struct i915_vma *vma;
> 
>                 /* Decouple the remaining vma to keep the next lookup fast */
>                 list = &ctx->ppgtt->base.vma_list;
>                 while (!list_empty(list)) {
>                         vma = list_first_entry(list, typeof(*vma), vm_link);
>                         list_del_init(&vma->vm_link);
>                         list_del_init(&vma->obj_link);
>                         i915_vma_put(vma);
>                 }
> 
>                 /* Drop active references to this vm upon retire */
>                 ctx->ppgtt->base.closed = true;
> 
>                 /* Drop all inactive references (via vma->vm reference) */
>                 list = &ctx->ppgtt->base.inactive_list;
>                 while (!list_empty(list)) {
>                         struct drm_i915_gem_object *obj;
>                         int ret;
> 
>                         vma = list_first_entry(list, typeof(*vma), mm_list);
>                         obj = vma->obj;
> 
>                         drm_gem_object_reference(&obj->base);
>                         ret = i915_vma_unbind(vma);
>                         drm_gem_object_unreference(&obj->base);
>                         if (WARN_ON(ret))
>                                 break;
>                 }
>         }
> 
>         ctx->file_priv = NULL;
>         i915_gem_context_unreference(ctx);
> 
>         return 0;
> }
> -Chris
> 
> -- 
> Chris Wilson, Intel Open Source Technology Centre
Daniel Vetter Feb. 23, 2015, 4:43 p.m. UTC | #7
On Fri, Feb 13, 2015 at 10:05:16AM +0000, Chris Wilson wrote:
> On Fri, Feb 13, 2015 at 10:51:36AM +0100, Daniel Vetter wrote:
> > On Thu, Feb 12, 2015 at 08:05:02PM +0000, rafael.barbalho@intel.com wrote:
> > > From: Rafael Barbalho <rafael.barbalho@intel.com>
> > > 
> > > With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
> > > cleaned up so that the PPGTT PDE & PTE allocations can be freed.
> > > 
> > > This problem only shows up with full PPGTT because an object's VMA is
> > > only cleaned-up when the object is destroyed. However, if the object has
> > > been shared between multiple processes this may not happen, which leads to
> > > references to the PPGTT still being kept the object was shared.
> > > 
> > > Under android the sharing of GEM objects is a fairly common operation, thus
> > > the clean-up has to be more agressive.
> > > 
> > > Signed-off-by: Rafael Barbalho <rafael.barbalho@intel.com>
> > > Cc: Daniel Vetter <daniel@ffwll.ch>
> > > Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> > 
> > So when we've merged this we iirc talked about this issue and decided that
> > the shrinker should be good enough in cleaning up the crap from shared
> > objects. Not a pretty solution, but it should have worked.
> > 
> > Is this again the lowmemory killer wreaking havoc with our i915 shrinker,
> > or is there something else going on? And do you have some igt testcase for
> > this? If sharing is all that's required the following should do the trick:
> > 1. allocate obj
> > 2. create new context
> > 3. do dummy execbuf with that obj to map it into the ppgtt
> > 4. free context
> > 5. goto 2 often enough to OOM
> 
> You know I have patches to fix all of this... It just happens to fall
> out of tracking vma in requests, and by extension vm.

Since you replied to my description of the igt testcase ... What kind of
bugs do you mean? I kinda can't find the context here ...
-Daniel
Chris Wilson Feb. 23, 2015, 4:50 p.m. UTC | #8
On Mon, Feb 23, 2015 at 05:41:51PM +0100, Daniel Vetter wrote:
> On Fri, Feb 13, 2015 at 10:34:51AM +0000, Chris Wilson wrote:
> > On Fri, Feb 13, 2015 at 10:55:46AM +0100, Daniel Vetter wrote:
> > > Well except that our unbind code is too dense to do that correctly for
> > > shared buffers, so we need to move obj->active to vma->active first.
> > 
> > We unbind vma, so what do you mean?
> 
> The unbind of the vma will block since we track active per-obj instead of
> per-vma. Which is kinda not that cool for a kref_put cleanup function.

As I have said and shown, that is very easy to rectify and has, for
example, nice side-effects of basically making operating on fences free
of blocking on the GPU if the object is busy only in the ppgtt. The
principle use is so that we are consistent in vma vs obj tracking and
make the code much cleaner imo.
-Chris
rafael.barbalho@intel.com Feb. 23, 2015, 4:52 p.m. UTC | #9
> -----Original Message-----
> From: Chris Wilson [mailto:chris@chris-wilson.co.uk]
> Sent: Monday, February 23, 2015 4:50 PM
> To: Daniel Vetter
> Cc: Barbalho, Rafael; intel-gfx@lists.freedesktop.org
> Subject: Re: [Intel-gfx] [RFC 2/2] drm/i915: Clean-up PPGTT on context
> destruction
> 
> On Mon, Feb 23, 2015 at 05:41:51PM +0100, Daniel Vetter wrote:
> > On Fri, Feb 13, 2015 at 10:34:51AM +0000, Chris Wilson wrote:
> > > On Fri, Feb 13, 2015 at 10:55:46AM +0100, Daniel Vetter wrote:
> > > > Well except that our unbind code is too dense to do that correctly for
> > > > shared buffers, so we need to move obj->active to vma->active first.
> > >
> > > We unbind vma, so what do you mean?
> >
> > The unbind of the vma will block since we track active per-obj instead of
> > per-vma. Which is kinda not that cool for a kref_put cleanup function.
> 
> As I have said and shown, that is very easy to rectify and has, for
> example, nice side-effects of basically making operating on fences free
> of blocking on the GPU if the object is busy only in the ppgtt. The
> principle use is so that we are consistent in vma vs obj tracking and
> make the code much cleaner imo.

My initial implementation was tracking the vma in the request structure but it
wasn't as clean. I'll take everything that has been said on-board and clean
things up, and add an igt test too while I am at it.

> 
> --
> Chris Wilson, Intel Open Source Technology Centre
Daniel Vetter Feb. 23, 2015, 11:44 p.m. UTC | #10
On Mon, Feb 23, 2015 at 04:52:46PM +0000, Barbalho, Rafael wrote:
> 
> 
> > -----Original Message-----
> > From: Chris Wilson [mailto:chris@chris-wilson.co.uk]
> > Sent: Monday, February 23, 2015 4:50 PM
> > To: Daniel Vetter
> > Cc: Barbalho, Rafael; intel-gfx@lists.freedesktop.org
> > Subject: Re: [Intel-gfx] [RFC 2/2] drm/i915: Clean-up PPGTT on context
> > destruction
> > 
> > On Mon, Feb 23, 2015 at 05:41:51PM +0100, Daniel Vetter wrote:
> > > On Fri, Feb 13, 2015 at 10:34:51AM +0000, Chris Wilson wrote:
> > > > On Fri, Feb 13, 2015 at 10:55:46AM +0100, Daniel Vetter wrote:
> > > > > Well except that our unbind code is too dense to do that correctly for
> > > > > shared buffers, so we need to move obj->active to vma->active first.
> > > >
> > > > We unbind vma, so what do you mean?
> > >
> > > The unbind of the vma will block since we track active per-obj instead of
> > > per-vma. Which is kinda not that cool for a kref_put cleanup function.
> > 
> > As I have said and shown, that is very easy to rectify and has, for
> > example, nice side-effects of basically making operating on fences free
> > of blocking on the GPU if the object is busy only in the ppgtt. The
> > principle use is so that we are consistent in vma vs obj tracking and
> > make the code much cleaner imo.
> 
> My initial implementation was tracking the vma in the request structure but it
> wasn't as clean. I'll take everything that has been said on-board and clean
> things up, and add an igt test too while I am at it.

Conceptually I guess we could split up the active list into something
per-request, at least right now. But I think that will totally upset the
scheduler work, so just moving obj->active to vmas should be a lot less
intrusive indeed.
-Daniel
Daniel Vetter Feb. 25, 2015, 11:17 p.m. UTC | #11
On Mon, Feb 23, 2015 at 05:41:51PM +0100, Daniel Vetter wrote:
> On Fri, Feb 13, 2015 at 10:34:51AM +0000, Chris Wilson wrote:
> > On Fri, Feb 13, 2015 at 10:55:46AM +0100, Daniel Vetter wrote:
> > > On Thu, Feb 12, 2015 at 09:03:06PM +0000, Chris Wilson wrote:
> > > > On Thu, Feb 12, 2015 at 08:05:02PM +0000, rafael.barbalho@intel.com wrote:
> > > > > From: Rafael Barbalho <rafael.barbalho@intel.com>
> > > > > 
> > > > > With full PPGTT enabled an object's VMA entry into a PPGTT VM needs to be
> > > > > cleaned up so that the PPGTT PDE & PTE allocations can be freed.
> > > > > 
> > > > > This problem only shows up with full PPGTT because an object's VMA is
> > > > > only cleaned-up when the object is destroyed. However, if the object has
> > > > > been shared between multiple processes this may not happen, which leads to
> > > > > references to the PPGTT still being kept the object was shared.
> > > > > 
> > > > > Under android the sharing of GEM objects is a fairly common operation, thus
> > > > > the clean-up has to be more agressive.
> > > > 
> > > > Not quite. You need an active refcount as we do not expect close(fd) to
> > > > stall. The trick is to "simply" use requests to retire vma (as well as
> > > > the object management it does today, though that just becomes a second
> > > > layer for GEM API management, everything else goes through vma).
> > > 
> > > Linking into the ctx unref should give us that for free since requests do
> > > hold a reference on the context. So this will only be run when the buffers
> > > are idle.
> > > 
> > > Well except that our unbind code is too dense to do that correctly for
> > > shared buffers, so we need to move obj->active to vma->active first.
> > 
> > We unbind vma, so what do you mean?
> 
> The unbind of the vma will block since we track active per-obj instead of
> per-vma. Which is kinda not that cool for a kref_put cleanup function.
> 
> But yeah the below is what I had in mind too, with the mentioned nuisance
> fixed.

Oh and there's a fun bit of code to clean up - in the execbuf code we just
pin the 2nd vma for the ggtt, which crucially relies on the ->active
boolean being shared by all vmas. That needs to be fixed first before we
can move obj->active to vma->active. For context see

commit da51a1e7e398129d9fddd4b26b8469145dd4fd08
Author: Daniel Vetter <daniel.vetter@ffwll.ch>
Date:   Mon Aug 11 12:08:58 2014 +0200

    drm/i915: Fix secure dispatch with full ppgtt

Cheers, Daniel
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 60b8bd1..e509d89 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4529,11 +4529,12 @@  void i915_gem_vma_destroy(struct i915_vma *vma)
 		return;
 
 	vm = vma->vm;
+	list_del(&vma->vma_link);
 
-	if (!i915_is_ggtt(vm))
+	if (!i915_is_ggtt(vm)) {
+		list_del(&vma->vm_link);
 		i915_ppgtt_put(i915_vm_to_ppgtt(vm));
-
-	list_del(&vma->vma_link);
+	}
 
 	kfree(vma);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5221d8..4319a93 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -140,7 +140,7 @@  void i915_gem_context_free(struct kref *ctx_ref)
 	if (i915.enable_execlists)
 		intel_lr_context_free(ctx);
 
-	i915_ppgtt_put(ctx->ppgtt);
+	i915_ppgtt_destroy(ctx->ppgtt);
 
 	if (ctx->legacy_hw_ctx.rcs_state)
 		drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6f410cf..9ef2f67 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1097,6 +1097,7 @@  static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 	else
 		BUG();
 }
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1108,6 +1109,8 @@  int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
 			    ppgtt->base.total);
 		i915_init_vm(dev_priv, &ppgtt->base);
+
+		INIT_LIST_HEAD(&ppgtt->vma_list);
 	}
 
 	return ret;
@@ -1177,14 +1180,49 @@  void  i915_ppgtt_release(struct kref *kref)
 	/* vmas should already be unbound */
 	WARN_ON(!list_empty(&ppgtt->base.active_list));
 	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+	WARN_ON(!list_empty(&ppgtt->vma_list));
 
 	list_del(&ppgtt->base.global_link);
 	drm_mm_takedown(&ppgtt->base.mm);
 
 	ppgtt->base.cleanup(&ppgtt->base);
+
 	kfree(ppgtt);
 }
 
+void
+i915_ppgtt_destroy(struct i915_hw_ppgtt *ppgtt)
+{
+	struct i915_vma *vma, *tmp;
+	struct i915_address_space *vm;
+	int ret;
+
+	if (!ppgtt)
+		return;
+
+	vm = &ppgtt->base;
+
+	/*
+	 * If this fires it means that the context reference counting went
+	 * awry.
+	 */
+	WARN_ON(!list_empty(&ppgtt->base.active_list));
+
+	if (!list_empty(&ppgtt->vma_list))
+		list_for_each_entry_safe(vma, tmp, &ppgtt->vma_list, vm_link) {
+			WARN_ON(vma->pin_count != 0);
+			/*
+			 * The object should be inactive at this point, thus
+			 * its pin_count should be 0. We will zero it anyway
+			 * make sure that the unbind call succeeds.
+			 */
+			vma->pin_count = 0;
+			ret = i915_vma_unbind(vma);
+		}
+
+	i915_ppgtt_put(ppgtt);
+}
+
 static void
 ppgtt_bind_vma(struct i915_vma *vma,
 	       enum i915_cache_level cache_level,
@@ -2109,6 +2147,7 @@  static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&vma->vma_link);
+	INIT_LIST_HEAD(&vma->vm_link);
 	INIT_LIST_HEAD(&vma->mm_list);
 	INIT_LIST_HEAD(&vma->exec_list);
 	vma->vm = vm;
@@ -2142,8 +2181,10 @@  static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	if (i915_is_ggtt(vm))
 		list_add(&vma->vma_link, &obj->vma_list);
 	else {
+		struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 		list_add_tail(&vma->vma_link, &obj->vma_list);
-		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+		i915_ppgtt_get(ppgtt);
+		list_add_tail(&vma->vm_link, &ppgtt->vma_list);
 	}
 
 	return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d5c14af..323bbc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -128,6 +128,8 @@  struct i915_vma {
 
 	struct list_head vma_link; /* Link in the object's VMA list */
 
+	struct list_head vm_link; /* Link in the VM's VMA list */
+
 	/** This vma's place in the batchbuffer or on the eviction list */
 	struct list_head exec_list;
 
@@ -149,6 +151,7 @@  struct i915_vma {
 	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
 	 * bits with absolutely no headroom. So use 4 bits. */
 	unsigned int pin_count:4;
+
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 
 	/** Unmap an object from an address space. This usually consists of
@@ -262,6 +265,8 @@  struct i915_hw_ppgtt {
 
 	struct drm_i915_file_private *file_priv;
 
+	struct list_head vma_list;
+
 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
 	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
 			 struct intel_engine_cs *ring);
@@ -280,6 +285,8 @@  int i915_ppgtt_init_hw(struct drm_device *dev);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
 					struct drm_i915_file_private *fpriv);
+void i915_ppgtt_destroy(struct i915_hw_ppgtt *ppgtt);
+
 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
 	if (ppgtt)