Message ID | d673210355d3c5660fd8cdb7ab7ef2def7e1976c.1531149873.git.hamohammed.sa@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Mon, Jul 09, 2018 at 06:44:26PM +0300, Haneen Mohammed wrote: > This patch add the necessary functions to map GEM > backing memory into the kernel's virtual address space. > > Signed-off-by: Haneen Mohammed <hamohammed.sa@gmail.com> > --- > drivers/gpu/drm/vkms/vkms_drv.c | 2 ++ > drivers/gpu/drm/vkms/vkms_drv.h | 5 ++++ > drivers/gpu/drm/vkms/vkms_gem.c | 50 +++++++++++++++++++++++++++++++++ > 3 files changed, 57 insertions(+) > > diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c > index fe93f8c17997..e48c8eeb495a 100644 > --- a/drivers/gpu/drm/vkms/vkms_drv.c > +++ b/drivers/gpu/drm/vkms/vkms_drv.c > @@ -52,9 +52,11 @@ static struct drm_driver vkms_driver = { > .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, > .release = vkms_release, > .fops = &vkms_driver_fops, > + > .dumb_create = vkms_dumb_create, > .dumb_map_offset = vkms_dumb_map, > .gem_vm_ops = &vkms_gem_vm_ops, > + .gem_free_object_unlocked = vkms_gem_free_object, This is a separate bugfix, fixing a fairly huge memory leak. I think it'd be good to catch this in the future, by adding a else WARN(1, "no gem free callback, leaking memory\n"); to the end of drm_gem_object_free. Can you pls do that? Also since this line here is a bugfix separate from the vmap support, can you pls split it out? > > .name = DRIVER_NAME, > .desc = DRIVER_DESC, > diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h > index 76f1720f81a5..d339a8108d85 100644 > --- a/drivers/gpu/drm/vkms/vkms_drv.h > +++ b/drivers/gpu/drm/vkms/vkms_drv.h > @@ -35,6 +35,7 @@ struct vkms_gem_object { > struct drm_gem_object gem; > struct mutex pages_lock; /* Page lock used in page fault handler */ > struct page **pages; > + void *vaddr; > }; > > int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, > @@ -58,4 +59,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, > int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, > u32 handle, u64 *offset); > > +void vkms_gem_free_object(struct drm_gem_object *obj); > + > +void *vkms_gem_vmap(struct drm_gem_object *obj); > + > #endif /* _VKMS_DRV_H_ */ > diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c > index 9f820f56b9e0..249855dded63 100644 > --- a/drivers/gpu/drm/vkms/vkms_gem.c > +++ b/drivers/gpu/drm/vkms/vkms_gem.c > @@ -166,3 +166,53 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, > > return ret; > } > + > +void vkms_gem_free_object(struct drm_gem_object *obj) > +{ > + struct vkms_gem_object *vkms_obj = container_of(obj, > + struct vkms_gem_object, > + gem); > + kvfree(vkms_obj->pages); We need to put the pages here too if ->pages exists, there's a put_pages helper for that. Also probably a good idea to vunmap here too. But I think both cases (i.e. vmap not yet released and pages still around) would indicate a bug in the vkms driver of releasing the object while it's still in use somewhere. So maybe also add a WARN_ON(obj->pages); WARN_ON(obj->vmap); here to make sure this doesn't happen? -Daniel > + mutex_destroy(&vkms_obj->pages_lock); > + drm_gem_object_release(obj); > + kfree(vkms_obj); > +} > + > +struct page **get_pages(struct vkms_gem_object *vkms_obj) > +{ > + struct drm_gem_object *gem_obj = &vkms_obj->gem; > + struct page **pages = vkms_obj->pages; > + > + if (!pages) { > + mutex_lock(&vkms_obj->pages_lock); > + pages = drm_gem_get_pages(gem_obj); > + if (IS_ERR(pages)) { > + mutex_unlock(&vkms_obj->pages_lock); > + return pages; > + } > + > + vkms_obj->pages = pages; > + mutex_unlock(&vkms_obj->pages_lock); > + } > + > + return pages; > +} > + > +void *vkms_gem_vmap(struct drm_gem_object *obj) > +{ > + void *vaddr = NULL; > + struct vkms_gem_object *vkms_obj = container_of(obj, > + struct vkms_gem_object, > + gem); > + unsigned int n_pages = obj->size >> PAGE_SHIFT; > + struct page **pages = get_pages(vkms_obj); > + > + if (IS_ERR(pages)) { > + DRM_INFO("pages allocation failed %ld\n", PTR_ERR(pages)); > + return vaddr; > + } > + > + vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); > + > + return vaddr; > +} > -- > 2.17.1 >
Quoting Haneen Mohammed (2018-07-09 16:44:26) > +struct page **get_pages(struct vkms_gem_object *vkms_obj) > +{ > + struct drm_gem_object *gem_obj = &vkms_obj->gem; > + struct page **pages = vkms_obj->pages; > + > + if (!pages) { > + mutex_lock(&vkms_obj->pages_lock); > + pages = drm_gem_get_pages(gem_obj); > + if (IS_ERR(pages)) { > + mutex_unlock(&vkms_obj->pages_lock); > + return pages; > + } > + > + vkms_obj->pages = pages; > + mutex_unlock(&vkms_obj->pages_lock); You have a race here with two callers setting pages. Trivially you fix it by checking if (!pages) again inside the lock, but the lock is superfluous in this case: if (!vkms_obj->pages) { srtuct pages **pages; pages = drm_gem_get_pages(gem_obj); if (IS_ERR(pages)) return pages; if (cmpxchg(&vkms_obj->pages, NULL, pages)) put_pages(pages); /* barrier() is implied */ } return vkms_obj->pages; -Chris
On Tue, Jul 10, 2018 at 09:12:36AM +0100, Chris Wilson wrote: > Quoting Haneen Mohammed (2018-07-09 16:44:26) > > +struct page **get_pages(struct vkms_gem_object *vkms_obj) > > +{ > > + struct drm_gem_object *gem_obj = &vkms_obj->gem; > > + struct page **pages = vkms_obj->pages; > > + > > + if (!pages) { > > + mutex_lock(&vkms_obj->pages_lock); > > + pages = drm_gem_get_pages(gem_obj); > > + if (IS_ERR(pages)) { > > + mutex_unlock(&vkms_obj->pages_lock); > > + return pages; > > + } > > + > > + vkms_obj->pages = pages; > > + mutex_unlock(&vkms_obj->pages_lock); > > You have a race here with two callers setting pages. Trivially you fix > it by checking if (!pages) again inside the lock, but the lock is > superfluous in this case: > if (!vkms_obj->pages) { > srtuct pages **pages; > > pages = drm_gem_get_pages(gem_obj); > if (IS_ERR(pages)) > return pages; > > if (cmpxchg(&vkms_obj->pages, NULL, pages)) > put_pages(pages); > > /* barrier() is implied */ > } > > return vkms_obj->pages; > -Chris Thank you for the feedback! - Haneen
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index fe93f8c17997..e48c8eeb495a 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -52,9 +52,11 @@ static struct drm_driver vkms_driver = { .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, .release = vkms_release, .fops = &vkms_driver_fops, + .dumb_create = vkms_dumb_create, .dumb_map_offset = vkms_dumb_map, .gem_vm_ops = &vkms_gem_vm_ops, + .gem_free_object_unlocked = vkms_gem_free_object, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 76f1720f81a5..d339a8108d85 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -35,6 +35,7 @@ struct vkms_gem_object { struct drm_gem_object gem; struct mutex pages_lock; /* Page lock used in page fault handler */ struct page **pages; + void *vaddr; }; int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, @@ -58,4 +59,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, u32 handle, u64 *offset); +void vkms_gem_free_object(struct drm_gem_object *obj); + +void *vkms_gem_vmap(struct drm_gem_object *obj); + #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 9f820f56b9e0..249855dded63 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -166,3 +166,53 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev, return ret; } + +void vkms_gem_free_object(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = container_of(obj, + struct vkms_gem_object, + gem); + kvfree(vkms_obj->pages); + mutex_destroy(&vkms_obj->pages_lock); + drm_gem_object_release(obj); + kfree(vkms_obj); +} + +struct page **get_pages(struct vkms_gem_object *vkms_obj) +{ + struct drm_gem_object *gem_obj = &vkms_obj->gem; + struct page **pages = vkms_obj->pages; + + if (!pages) { + mutex_lock(&vkms_obj->pages_lock); + pages = drm_gem_get_pages(gem_obj); + if (IS_ERR(pages)) { + mutex_unlock(&vkms_obj->pages_lock); + return pages; + } + + vkms_obj->pages = pages; + mutex_unlock(&vkms_obj->pages_lock); + } + + return pages; +} + +void *vkms_gem_vmap(struct drm_gem_object *obj) +{ + void *vaddr = NULL; + struct vkms_gem_object *vkms_obj = container_of(obj, + struct vkms_gem_object, + gem); + unsigned int n_pages = obj->size >> PAGE_SHIFT; + struct page **pages = get_pages(vkms_obj); + + if (IS_ERR(pages)) { + DRM_INFO("pages allocation failed %ld\n", PTR_ERR(pages)); + return vaddr; + } + + vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); + + return vaddr; +}
This patch add the necessary functions to map GEM backing memory into the kernel's virtual address space. Signed-off-by: Haneen Mohammed <hamohammed.sa@gmail.com> --- drivers/gpu/drm/vkms/vkms_drv.c | 2 ++ drivers/gpu/drm/vkms/vkms_drv.h | 5 ++++ drivers/gpu/drm/vkms/vkms_gem.c | 50 +++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+)