Message ID | 20171201103624.6565-7-l.stach@pengutronix.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Fri, 2017-12-01 at 11:36 +0100, Lucas Stach wrote: > All code paths which populate userptr BOs are fine with the get_pages > function taking the mmap_sem lock. This allows to get rid of the pretty > involved architecture with a worker being scheduled if the mmap_sem > needs to be taken, but instead call GUP directly and allow it to take > the lock if necessary. > > This simplifies the code a lot and removes the possibility of this > function returning -EAGAIN, which complicates object population > handling at the callers. > > Signed-off-by: Lucas Stach <l.stach@pengutronix.de> > --- > drivers/gpu/drm/etnaviv/etnaviv_gem.c | 146 +++++----------------------------- > drivers/gpu/drm/etnaviv/etnaviv_gem.h | 3 +- > 2 files changed, 23 insertions(+), 126 deletions(-) > > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c > index a52220eeee45..fcc969fa0e69 100644 > --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c > +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c > @@ -705,141 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, > return 0; > } > > -struct get_pages_work { > - struct work_struct work; > - struct mm_struct *mm; > - struct task_struct *task; > - struct etnaviv_gem_object *etnaviv_obj; > -}; > - > -static struct page **etnaviv_gem_userptr_do_get_pages( > - struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task) > -{ > - int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; > - struct page **pvec; > - uintptr_t ptr; > - unsigned int flags = 0; > - > - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); > - if (!pvec) > - return ERR_PTR(-ENOMEM); > - > - if (!etnaviv_obj->userptr.ro) > - flags |= FOLL_WRITE; > - > - pinned = 0; > - ptr = etnaviv_obj->userptr.ptr; > - > - down_read(&mm->mmap_sem); > - while (pinned < npages) { > - ret = get_user_pages_remote(task, mm, ptr, npages - pinned, > - flags, pvec + pinned, NULL, NULL); > - if (ret < 0) > - break; > - > - ptr += ret * PAGE_SIZE; > - pinned += ret; > - } > - up_read(&mm->mmap_sem); > - > - if (ret < 0) { > - release_pages(pvec, pinned); > - kvfree(pvec); > - return ERR_PTR(ret); > - } > - > - return pvec; > -} > - > -static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work) > -{ > - struct get_pages_work *work = container_of(_work, typeof(*work), work); > - struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj; > - struct page **pvec; > - > - pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task); > - > - mutex_lock(&etnaviv_obj->lock); > - if (IS_ERR(pvec)) { > - etnaviv_obj->userptr.work = ERR_CAST(pvec); > - } else { > - etnaviv_obj->userptr.work = NULL; > - etnaviv_obj->pages = pvec; > - } > - > - mutex_unlock(&etnaviv_obj->lock); > - drm_gem_object_put_unlocked(&etnaviv_obj->base); > - > - mmput(work->mm); > - put_task_struct(work->task); > - kfree(work); > -} > - > static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) > { > struct page **pvec = NULL; > - struct get_pages_work *work; > - struct mm_struct *mm; > - int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; > + struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; > + int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; > > might_lock_read(¤t->mm->mmap_sem); > > - if (etnaviv_obj->userptr.work) { > - if (IS_ERR(etnaviv_obj->userptr.work)) { > - ret = PTR_ERR(etnaviv_obj->userptr.work); > - etnaviv_obj->userptr.work = NULL; > - } else { > - ret = -EAGAIN; > - } > - return ret; > - } > + if (userptr->mm != current->mm) > + return -EPERM; I don't pretend to understand the implications of this patch completely. It looks mostly good to me, but this part seems to limit previously allowed behaviour. I think this warrants a mention in the commit message. regards Philipp
On Fri, Dec 01, 2017 at 05:38:51PM +0100, Philipp Zabel wrote: > On Fri, 2017-12-01 at 11:36 +0100, Lucas Stach wrote: > > All code paths which populate userptr BOs are fine with the get_pages > > function taking the mmap_sem lock. This allows to get rid of the pretty > > involved architecture with a worker being scheduled if the mmap_sem > > needs to be taken, but instead call GUP directly and allow it to take > > the lock if necessary. > > > > This simplifies the code a lot and removes the possibility of this > > function returning -EAGAIN, which complicates object population > > handling at the callers. > > > > Signed-off-by: Lucas Stach <l.stach@pengutronix.de> > > --- > > drivers/gpu/drm/etnaviv/etnaviv_gem.c | 146 +++++----------------------------- > > drivers/gpu/drm/etnaviv/etnaviv_gem.h | 3 +- > > 2 files changed, 23 insertions(+), 126 deletions(-) > > > > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c > > index a52220eeee45..fcc969fa0e69 100644 > > --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c > > +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c > > @@ -705,141 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, > > return 0; > > } > > > > -struct get_pages_work { > > - struct work_struct work; > > - struct mm_struct *mm; > > - struct task_struct *task; > > - struct etnaviv_gem_object *etnaviv_obj; > > -}; > > - > > -static struct page **etnaviv_gem_userptr_do_get_pages( > > - struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task) > > -{ > > - int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; > > - struct page **pvec; > > - uintptr_t ptr; > > - unsigned int flags = 0; > > - > > - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); > > - if (!pvec) > > - return ERR_PTR(-ENOMEM); > > - > > - if (!etnaviv_obj->userptr.ro) > > - flags |= FOLL_WRITE; > > - > > - pinned = 0; > > - ptr = etnaviv_obj->userptr.ptr; > > - > > - down_read(&mm->mmap_sem); > > - while (pinned < npages) { > > - ret = get_user_pages_remote(task, mm, ptr, npages - pinned, > > - flags, pvec + pinned, NULL, NULL); > > - if (ret < 0) > > - break; > > - > > - ptr += ret * PAGE_SIZE; > > - pinned += ret; > > - } > > - up_read(&mm->mmap_sem); > > - > > - if (ret < 0) { > > - release_pages(pvec, pinned); > > - kvfree(pvec); > > - return ERR_PTR(ret); > > - } > > - > > - return pvec; > > -} > > - > > -static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work) > > -{ > > - struct get_pages_work *work = container_of(_work, typeof(*work), work); > > - struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj; > > - struct page **pvec; > > - > > - pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task); > > - > > - mutex_lock(&etnaviv_obj->lock); > > - if (IS_ERR(pvec)) { > > - etnaviv_obj->userptr.work = ERR_CAST(pvec); > > - } else { > > - etnaviv_obj->userptr.work = NULL; > > - etnaviv_obj->pages = pvec; > > - } > > - > > - mutex_unlock(&etnaviv_obj->lock); > > - drm_gem_object_put_unlocked(&etnaviv_obj->base); > > - > > - mmput(work->mm); > > - put_task_struct(work->task); > > - kfree(work); > > -} > > - > > static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) > > { > > struct page **pvec = NULL; > > - struct get_pages_work *work; > > - struct mm_struct *mm; > > - int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; > > + struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; > > + int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; > > > > might_lock_read(¤t->mm->mmap_sem); > > > > - if (etnaviv_obj->userptr.work) { > > - if (IS_ERR(etnaviv_obj->userptr.work)) { > > - ret = PTR_ERR(etnaviv_obj->userptr.work); > > - etnaviv_obj->userptr.work = NULL; > > - } else { > > - ret = -EAGAIN; > > - } > > - return ret; > > - } > > + if (userptr->mm != current->mm) > > + return -EPERM; > > I don't pretend to understand the implications of this patch completely. > It looks mostly good to me, but this part seems to limit previously > allowed behaviour. I think this warrants a mention in the commit > message. The point of the complexity is to be able to grab the mmap_sem avoiding any locking dependencies that would normally occur if it were done in the ioctl. However, drm locking has changed quite a bit over the last year, and this is probably not be needed anymore - I assume that Lucas has thoroughly verified the locking dependencies. However, I notice i915 still retains this code though.
Am Freitag, den 01.12.2017, 16:51 +0000 schrieb Russell King - ARM Linux: > On Fri, Dec 01, 2017 at 05:38:51PM +0100, Philipp Zabel wrote: > > On Fri, 2017-12-01 at 11:36 +0100, Lucas Stach wrote: > > > All code paths which populate userptr BOs are fine with the > > > get_pages > > > function taking the mmap_sem lock. This allows to get rid of the > > > pretty > > > involved architecture with a worker being scheduled if the > > > mmap_sem > > > needs to be taken, but instead call GUP directly and allow it to > > > take > > > the lock if necessary. > > > > > > This simplifies the code a lot and removes the possibility of > > > this > > > function returning -EAGAIN, which complicates object population > > > handling at the callers. > > > > > > Signed-off-by: Lucas Stach <l.stach@pengutronix.de> > > > --- > > > drivers/gpu/drm/etnaviv/etnaviv_gem.c | 146 +++++--------------- > > > -------------- > > > drivers/gpu/drm/etnaviv/etnaviv_gem.h | 3 +- > > > 2 files changed, 23 insertions(+), 126 deletions(-) > > > > > > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c > > > b/drivers/gpu/drm/etnaviv/etnaviv_gem.c > > > index a52220eeee45..fcc969fa0e69 100644 > > > --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c > > > +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c > > > @@ -705,141 +705,41 @@ int etnaviv_gem_new_private(struct > > > drm_device *dev, size_t size, u32 flags, > > > return 0; > > > } > > > > > > -struct get_pages_work { > > > - struct work_struct work; > > > - struct mm_struct *mm; > > > - struct task_struct *task; > > > - struct etnaviv_gem_object *etnaviv_obj; > > > -}; > > > - > > > -static struct page **etnaviv_gem_userptr_do_get_pages( > > > - struct etnaviv_gem_object *etnaviv_obj, struct mm_struct > > > *mm, struct task_struct *task) > > > -{ > > > - int ret = 0, pinned, npages = etnaviv_obj->base.size >> > > > PAGE_SHIFT; > > > - struct page **pvec; > > > - uintptr_t ptr; > > > - unsigned int flags = 0; > > > - > > > - pvec = kvmalloc_array(npages, sizeof(struct page *), > > > GFP_KERNEL); > > > - if (!pvec) > > > - return ERR_PTR(-ENOMEM); > > > - > > > - if (!etnaviv_obj->userptr.ro) > > > - flags |= FOLL_WRITE; > > > - > > > - pinned = 0; > > > - ptr = etnaviv_obj->userptr.ptr; > > > - > > > - down_read(&mm->mmap_sem); > > > - while (pinned < npages) { > > > - ret = get_user_pages_remote(task, mm, ptr, > > > npages - pinned, > > > - flags, pvec + > > > pinned, NULL, NULL); > > > - if (ret < 0) > > > - break; > > > - > > > - ptr += ret * PAGE_SIZE; > > > - pinned += ret; > > > - } > > > - up_read(&mm->mmap_sem); > > > - > > > - if (ret < 0) { > > > - release_pages(pvec, pinned); > > > - kvfree(pvec); > > > - return ERR_PTR(ret); > > > - } > > > - > > > - return pvec; > > > -} > > > - > > > -static void __etnaviv_gem_userptr_get_pages(struct work_struct > > > *_work) > > > -{ > > > - struct get_pages_work *work = container_of(_work, > > > typeof(*work), work); > > > - struct etnaviv_gem_object *etnaviv_obj = work- > > > >etnaviv_obj; > > > - struct page **pvec; > > > - > > > - pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, > > > work->mm, work->task); > > > - > > > - mutex_lock(&etnaviv_obj->lock); > > > - if (IS_ERR(pvec)) { > > > - etnaviv_obj->userptr.work = ERR_CAST(pvec); > > > - } else { > > > - etnaviv_obj->userptr.work = NULL; > > > - etnaviv_obj->pages = pvec; > > > - } > > > - > > > - mutex_unlock(&etnaviv_obj->lock); > > > - drm_gem_object_put_unlocked(&etnaviv_obj->base); > > > - > > > - mmput(work->mm); > > > - put_task_struct(work->task); > > > - kfree(work); > > > -} > > > - > > > static int etnaviv_gem_userptr_get_pages(struct > > > etnaviv_gem_object *etnaviv_obj) > > > { > > > struct page **pvec = NULL; > > > - struct get_pages_work *work; > > > - struct mm_struct *mm; > > > - int ret, pinned, npages = etnaviv_obj->base.size >> > > > PAGE_SHIFT; > > > + struct etnaviv_gem_userptr *userptr = &etnaviv_obj- > > > >userptr; > > > + int ret, pinned = 0, npages = etnaviv_obj->base.size >> > > > PAGE_SHIFT; > > > > > > might_lock_read(¤t->mm->mmap_sem); > > > > > > - if (etnaviv_obj->userptr.work) { > > > - if (IS_ERR(etnaviv_obj->userptr.work)) { > > > - ret = PTR_ERR(etnaviv_obj- > > > >userptr.work); > > > - etnaviv_obj->userptr.work = NULL; > > > - } else { > > > - ret = -EAGAIN; > > > - } > > > - return ret; > > > - } > > > + if (userptr->mm != current->mm) > > > + return -EPERM; > > > > I don't pretend to understand the implications of this patch > > completely. > > It looks mostly good to me, but this part seems to limit previously > > allowed behaviour. I think this warrants a mention in the commit > > message. > > The point of the complexity is to be able to grab the mmap_sem > avoiding > any locking dependencies that would normally occur if it were done in > the ioctl. > > However, drm locking has changed quite a bit over the last year, and > this is probably not be needed anymore - I assume that Lucas has > thoroughly verified the locking dependencies. However, I notice i915 > still retains this code though. Yes, I have. Both by going through, reading a lot of the userptr related core kernel code and by improving lockdeps ability to reason about the locking used inside of etnaviv. i915 still retains this code, as they are still using the "big DRM lock" struct_mutex. Etnaviv has a much more fine-grained locking scheme, allowing us to drop this complexity. Philipp is right in that this change has a functional change by rejecting attempts to populate userptr objects created by foreign processes. Something which was allowed before, but would have been pretty broken in practice if anyone dared to do so, as it violates the coherency rules set by the etnaviv GEM object handling. Regards, Lucas
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index a52220eeee45..fcc969fa0e69 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -705,141 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags, return 0; } -struct get_pages_work { - struct work_struct work; - struct mm_struct *mm; - struct task_struct *task; - struct etnaviv_gem_object *etnaviv_obj; -}; - -static struct page **etnaviv_gem_userptr_do_get_pages( - struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task) -{ - int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; - struct page **pvec; - uintptr_t ptr; - unsigned int flags = 0; - - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!pvec) - return ERR_PTR(-ENOMEM); - - if (!etnaviv_obj->userptr.ro) - flags |= FOLL_WRITE; - - pinned = 0; - ptr = etnaviv_obj->userptr.ptr; - - down_read(&mm->mmap_sem); - while (pinned < npages) { - ret = get_user_pages_remote(task, mm, ptr, npages - pinned, - flags, pvec + pinned, NULL, NULL); - if (ret < 0) - break; - - ptr += ret * PAGE_SIZE; - pinned += ret; - } - up_read(&mm->mmap_sem); - - if (ret < 0) { - release_pages(pvec, pinned); - kvfree(pvec); - return ERR_PTR(ret); - } - - return pvec; -} - -static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work) -{ - struct get_pages_work *work = container_of(_work, typeof(*work), work); - struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj; - struct page **pvec; - - pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task); - - mutex_lock(&etnaviv_obj->lock); - if (IS_ERR(pvec)) { - etnaviv_obj->userptr.work = ERR_CAST(pvec); - } else { - etnaviv_obj->userptr.work = NULL; - etnaviv_obj->pages = pvec; - } - - mutex_unlock(&etnaviv_obj->lock); - drm_gem_object_put_unlocked(&etnaviv_obj->base); - - mmput(work->mm); - put_task_struct(work->task); - kfree(work); -} - static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj) { struct page **pvec = NULL; - struct get_pages_work *work; - struct mm_struct *mm; - int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; + struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr; + int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; might_lock_read(¤t->mm->mmap_sem); - if (etnaviv_obj->userptr.work) { - if (IS_ERR(etnaviv_obj->userptr.work)) { - ret = PTR_ERR(etnaviv_obj->userptr.work); - etnaviv_obj->userptr.work = NULL; - } else { - ret = -EAGAIN; - } - return ret; - } + if (userptr->mm != current->mm) + return -EPERM; - mm = get_task_mm(etnaviv_obj->userptr.task); - pinned = 0; - if (mm == current->mm) { - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!pvec) { - mmput(mm); - return -ENOMEM; - } + pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (!pvec) + return -ENOMEM; + + do { + unsigned num_pages = npages - pinned; + uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE; + struct page **pages = pvec + pinned; - pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages, - !etnaviv_obj->userptr.ro, pvec); - if (pinned < 0) { + ret = get_user_pages_fast(ptr, num_pages, + !userptr->ro ? FOLL_WRITE : 0, pages); + if (ret < 0) { + release_pages(pvec, pinned); kvfree(pvec); - mmput(mm); - return pinned; + return ret; } - if (pinned == npages) { - etnaviv_obj->pages = pvec; - mmput(mm); - return 0; - } - } - - release_pages(pvec, pinned); - kvfree(pvec); - - work = kmalloc(sizeof(*work), GFP_KERNEL); - if (!work) { - mmput(mm); - return -ENOMEM; - } - - get_task_struct(current); - drm_gem_object_get(&etnaviv_obj->base); - - work->mm = mm; - work->task = current; - work->etnaviv_obj = etnaviv_obj; + pinned += ret; - etnaviv_obj->userptr.work = &work->work; - INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages); + } while (pinned < npages); - etnaviv_queue_work(etnaviv_obj->base.dev, &work->work); + etnaviv_obj->pages = pvec; - return -EAGAIN; + return 0; } static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) @@ -855,7 +755,6 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) release_pages(etnaviv_obj->pages, npages); kvfree(etnaviv_obj->pages); } - put_task_struct(etnaviv_obj->userptr.task); } static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, @@ -885,9 +784,8 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class); etnaviv_obj->userptr.ptr = ptr; - etnaviv_obj->userptr.task = current; + etnaviv_obj->userptr.mm = current->mm; etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE); - get_task_struct(current); etnaviv_gem_obj_add(dev, &etnaviv_obj->base); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index 00bd9c851a02..d1a7d040ac97 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -26,8 +26,7 @@ struct etnaviv_gem_object; struct etnaviv_gem_userptr { uintptr_t ptr; - struct task_struct *task; - struct work_struct *work; + struct mm_struct *mm; bool ro; };
All code paths which populate userptr BOs are fine with the get_pages function taking the mmap_sem lock. This allows to get rid of the pretty involved architecture with a worker being scheduled if the mmap_sem needs to be taken, but instead call GUP directly and allow it to take the lock if necessary. This simplifies the code a lot and removes the possibility of this function returning -EAGAIN, which complicates object population handling at the callers. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> --- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 146 +++++----------------------------- drivers/gpu/drm/etnaviv/etnaviv_gem.h | 3 +- 2 files changed, 23 insertions(+), 126 deletions(-)