Message ID | f1e25df28917993930ffc5db596771fe6de0fa59.1531955948.git.hamohammed.sa@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Jul 19, 2018 at 03:18:03AM +0300, Haneen Mohammed wrote: > This patch add the necessary functions to map/unmap GEM > backing memory to the kernel's virtual address space. > > Signed-off-by: Haneen Mohammed <hamohammed.sa@gmail.com> > --- > Changes in v2: > - add vkms_gem_vunmap > - use vmap_count to guard against multiple prepare_fb calls on the same > fb > Changes in v3: > - change vkms_gem_vmap to retrun int > - cleanup if vmap failed > - increment vmap_count after successful vmap > > drivers/gpu/drm/vkms/vkms_drv.h | 9 ++++ > drivers/gpu/drm/vkms/vkms_gem.c | 80 ++++++++++++++++++++++++++++++++- > 2 files changed, 88 insertions(+), 1 deletion(-) > > diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h > index 07be29f2dc44..47048f707566 100644 > --- a/drivers/gpu/drm/vkms/vkms_drv.h > +++ b/drivers/gpu/drm/vkms/vkms_drv.h > @@ -39,6 +39,8 @@ struct vkms_gem_object { > struct drm_gem_object gem; > struct mutex pages_lock; /* Page lock used in page fault handler */ > struct page **pages; > + unsigned int vmap_count; > + void *vaddr; > }; > > #define drm_crtc_to_vkms_output(target) \ > @@ -47,6 +49,9 @@ struct vkms_gem_object { > #define drm_device_to_vkms_device(target) \ > container_of(target, struct vkms_device, drm) > > +#define drm_gem_to_vkms_gem(target)\ > + container_of(target, struct vkms_gem_object, gem) > + > /* CRTC */ > int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, > struct drm_plane *primary, struct drm_plane *cursor); > @@ -75,4 +80,8 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, > > void vkms_gem_free_object(struct drm_gem_object *obj); > > +int vkms_gem_vmap(struct drm_gem_object *obj); > + > +void vkms_gem_vunmap(struct drm_gem_object *obj); > + > #endif /* _VKMS_DRV_H_ */ > diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c > index c7e38368602b..2e55c906d9b2 100644 > --- a/drivers/gpu/drm/vkms/vkms_gem.c > +++ b/drivers/gpu/drm/vkms/vkms_gem.c > @@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj) > struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object, > gem); > > - kvfree(gem->pages); > + WARN_ON(gem->pages); > + WARN_ON(gem->vaddr); > + > mutex_destroy(&gem->pages_lock); > drm_gem_object_release(obj); > kfree(gem); > @@ -177,3 +179,79 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, > > return ret; > } > + > +static struct page **_get_pages(struct vkms_gem_object *vkms_obj) > +{ > + struct drm_gem_object *gem_obj = &vkms_obj->gem; > + > + if (!vkms_obj->pages) { > + struct page **pages = drm_gem_get_pages(gem_obj); > + > + if (IS_ERR(pages)) > + return pages; > + > + if (cmpxchg(&vkms_obj->pages, NULL, pages)) > + drm_gem_put_pages(gem_obj, pages, false, true); > + } > + > + return vkms_obj->pages; > +} > + > +void vkms_gem_vunmap(struct drm_gem_object *obj) > +{ > + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); > + > + mutex_lock(&vkms_obj->pages_lock); > + if (vkms_obj->vmap_count < 1) { > + WARN_ON(vkms_obj->vaddr); > + WARN_ON(vkms_obj->pages); > + mutex_unlock(&vkms_obj->pages_lock); > + return; > + } > + > + vkms_obj->vmap_count--; > + > + if (vkms_obj->vmap_count == 0) { > + vunmap(vkms_obj->vaddr); > + vkms_obj->vaddr = NULL; > + drm_gem_put_pages(obj, vkms_obj->pages, false, true); > + vkms_obj->pages = NULL; > + } > + > + mutex_unlock(&vkms_obj->pages_lock); > +} > + > +int vkms_gem_vmap(struct drm_gem_object *obj) > +{ > + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); > + int ret = 0; > + > + mutex_lock(&vkms_obj->pages_lock); > + > + if (!vkms_obj->vaddr) { > + unsigned int n_pages = obj->size >> PAGE_SHIFT; > + struct page **pages = _get_pages(vkms_obj); > + > + if (IS_ERR(pages)) { > + ret = PTR_ERR(pages); > + goto fail; > + } > + > + vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); > + if (!vkms_obj->vaddr) { > + ret = -ENOMEM; > + drm_gem_put_pages(obj, vkms_obj->pages, false, true); > + vkms_obj->pages = NULL; > + goto fail; > + } > + > + vkms_obj->vmap_count++; > + } > + > + mutex_unlock(&vkms_obj->pages_lock); > + return 0; > + > +fail: Since fail does the same as the success path, there are 2 ways of handling this that would be (IMO) better: 1- Rename fail to out and remove the redundant 2 lines above 2- Add a new label above fail called err_vmap and do the ret/put_pages/pages=NULL cleanup there, dropping into the unlock and return after. With either of these changes, Reviewed-by: Sean Paul <seanpaul@chromium.org> Sean > + mutex_unlock(&vkms_obj->pages_lock); > + return ret; > +} > -- > 2.17.1 >
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 07be29f2dc44..47048f707566 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -39,6 +39,8 @@ struct vkms_gem_object { struct drm_gem_object gem; struct mutex pages_lock; /* Page lock used in page fault handler */ struct page **pages; + unsigned int vmap_count; + void *vaddr; }; #define drm_crtc_to_vkms_output(target) \ @@ -47,6 +49,9 @@ struct vkms_gem_object { #define drm_device_to_vkms_device(target) \ container_of(target, struct vkms_device, drm) +#define drm_gem_to_vkms_gem(target)\ + container_of(target, struct vkms_gem_object, gem) + /* CRTC */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor); @@ -75,4 +80,8 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, void vkms_gem_free_object(struct drm_gem_object *obj); +int vkms_gem_vmap(struct drm_gem_object *obj); + +void vkms_gem_vunmap(struct drm_gem_object *obj); + #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index c7e38368602b..2e55c906d9b2 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj) struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object, gem); - kvfree(gem->pages); + WARN_ON(gem->pages); + WARN_ON(gem->vaddr); + mutex_destroy(&gem->pages_lock); drm_gem_object_release(obj); kfree(gem); @@ -177,3 +179,79 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, return ret; } + +static struct page **_get_pages(struct vkms_gem_object *vkms_obj) +{ + struct drm_gem_object *gem_obj = &vkms_obj->gem; + + if (!vkms_obj->pages) { + struct page **pages = drm_gem_get_pages(gem_obj); + + if (IS_ERR(pages)) + return pages; + + if (cmpxchg(&vkms_obj->pages, NULL, pages)) + drm_gem_put_pages(gem_obj, pages, false, true); + } + + return vkms_obj->pages; +} + +void vkms_gem_vunmap(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); + + mutex_lock(&vkms_obj->pages_lock); + if (vkms_obj->vmap_count < 1) { + WARN_ON(vkms_obj->vaddr); + WARN_ON(vkms_obj->pages); + mutex_unlock(&vkms_obj->pages_lock); + return; + } + + vkms_obj->vmap_count--; + + if (vkms_obj->vmap_count == 0) { + vunmap(vkms_obj->vaddr); + vkms_obj->vaddr = NULL; + drm_gem_put_pages(obj, vkms_obj->pages, false, true); + vkms_obj->pages = NULL; + } + + mutex_unlock(&vkms_obj->pages_lock); +} + +int vkms_gem_vmap(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); + int ret = 0; + + mutex_lock(&vkms_obj->pages_lock); + + if (!vkms_obj->vaddr) { + unsigned int n_pages = obj->size >> PAGE_SHIFT; + struct page **pages = _get_pages(vkms_obj); + + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto fail; + } + + vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); + if (!vkms_obj->vaddr) { + ret = -ENOMEM; + drm_gem_put_pages(obj, vkms_obj->pages, false, true); + vkms_obj->pages = NULL; + goto fail; + } + + vkms_obj->vmap_count++; + } + + mutex_unlock(&vkms_obj->pages_lock); + return 0; + +fail: + mutex_unlock(&vkms_obj->pages_lock); + return ret; +}
This patch add the necessary functions to map/unmap GEM backing memory to the kernel's virtual address space. Signed-off-by: Haneen Mohammed <hamohammed.sa@gmail.com> --- Changes in v2: - add vkms_gem_vunmap - use vmap_count to guard against multiple prepare_fb calls on the same fb Changes in v3: - change vkms_gem_vmap to retrun int - cleanup if vmap failed - increment vmap_count after successful vmap drivers/gpu/drm/vkms/vkms_drv.h | 9 ++++ drivers/gpu/drm/vkms/vkms_gem.c | 80 ++++++++++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 1 deletion(-)