Message ID | 1404164773-14759-1-git-send-email-j.glisse@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Hi Jerome, On 2014? 07? 01? 06:46, j.glisse@gmail.com wrote: > From: Jerome Glisse <jglisse@redhat.com> > > get_user_pages gives no garanty that page it returns are still > the one backing the vma by the time it returns. Thus any ioctl One of pages from get_user_pages() could be migrated? I think all the pages are pinned so that they cannot be migrated. If not such case, is there other case I missed? One more thing, do you think this issue cannot be resolved so you are trying to remove userptr feature from g2d driver? Thanks, Inki Dae > that rely on this behavior is broken and rely on pure luck. To > avoid any false hope from userspace stop such useage by simply > flat out returning -EFAULT. Better to have a reliable behavior > than to depend on pure luck and currently observed behavior of > mm code. > > Note this was not even compile tested but i think i did update > all places. > > Signed-off-by: Jérôme Glisse <jglisse@redhat.com> > --- > drivers/gpu/drm/exynos/exynos_drm_drv.h | 1 - > drivers/gpu/drm/exynos/exynos_drm_g2d.c | 277 +------------------------------- > drivers/gpu/drm/exynos/exynos_drm_gem.c | 60 ------- > drivers/gpu/drm/exynos/exynos_drm_gem.h | 20 --- > 4 files changed, 3 insertions(+), 355 deletions(-) > > diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h > index 36535f3..7b55e89 100644 > --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h > +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h > @@ -233,7 +233,6 @@ struct exynos_drm_g2d_private { > struct device *dev; > struct list_head inuse_cmdlist; > struct list_head event_list; > - struct list_head userptr_list; > }; > > struct exynos_drm_ipp_private { > diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c > index 8001587..d0be6dc 100644 > --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c > +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c > @@ -118,9 +118,6 @@ > #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) > #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) > > -/* maximum buffer pool size of userptr is 64MB as default */ > -#define MAX_POOL (64 * 1024 * 1024) > - > enum { > BUF_TYPE_GEM = 1, > BUF_TYPE_USERPTR, > @@ -185,19 +182,6 @@ struct drm_exynos_pending_g2d_event { > struct drm_exynos_g2d_event event; > }; > > -struct g2d_cmdlist_userptr { > - struct list_head list; > - dma_addr_t dma_addr; > - unsigned long userptr; > - unsigned long size; > - struct page **pages; > - unsigned int npages; > - struct sg_table *sgt; > - struct vm_area_struct *vma; > - atomic_t refcount; > - bool in_pool; > - bool out_of_list; > -}; > struct g2d_cmdlist_node { > struct list_head list; > struct g2d_cmdlist *cmdlist; > @@ -242,7 +226,6 @@ struct g2d_data { > struct kmem_cache *runqueue_slab; > > unsigned long current_pool; > - unsigned long max_pool; > }; > > static int g2d_init_cmdlist(struct g2d_data *g2d) > @@ -354,232 +337,6 @@ add_to_list: > list_add_tail(&node->event->base.link, &g2d_priv->event_list); > } > > -static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, > - unsigned long obj, > - bool force) > -{ > - struct g2d_cmdlist_userptr *g2d_userptr = > - (struct g2d_cmdlist_userptr *)obj; > - > - if (!obj) > - return; > - > - if (force) > - goto out; > - > - atomic_dec(&g2d_userptr->refcount); > - > - if (atomic_read(&g2d_userptr->refcount) > 0) > - return; > - > - if (g2d_userptr->in_pool) > - return; > - > -out: > - exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, > - DMA_BIDIRECTIONAL); > - > - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, > - g2d_userptr->npages, > - g2d_userptr->vma); > - > - exynos_gem_put_vma(g2d_userptr->vma); > - > - if (!g2d_userptr->out_of_list) > - list_del_init(&g2d_userptr->list); > - > - sg_free_table(g2d_userptr->sgt); > - kfree(g2d_userptr->sgt); > - > - drm_free_large(g2d_userptr->pages); > - kfree(g2d_userptr); > -} > - > -static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, > - unsigned long userptr, > - unsigned long size, > - struct drm_file *filp, > - unsigned long *obj) > -{ > - struct drm_exynos_file_private *file_priv = filp->driver_priv; > - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; > - struct g2d_cmdlist_userptr *g2d_userptr; > - struct g2d_data *g2d; > - struct page **pages; > - struct sg_table *sgt; > - struct vm_area_struct *vma; > - unsigned long start, end; > - unsigned int npages, offset; > - int ret; > - > - if (!size) { > - DRM_ERROR("invalid userptr size.\n"); > - return ERR_PTR(-EINVAL); > - } > - > - g2d = dev_get_drvdata(g2d_priv->dev); > - > - /* check if userptr already exists in userptr_list. */ > - list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) { > - if (g2d_userptr->userptr == userptr) { > - /* > - * also check size because there could be same address > - * and different size. > - */ > - if (g2d_userptr->size == size) { > - atomic_inc(&g2d_userptr->refcount); > - *obj = (unsigned long)g2d_userptr; > - > - return &g2d_userptr->dma_addr; > - } > - > - /* > - * at this moment, maybe g2d dma is accessing this > - * g2d_userptr memory region so just remove this > - * g2d_userptr object from userptr_list not to be > - * referred again and also except it the userptr > - * pool to be released after the dma access completion. > - */ > - g2d_userptr->out_of_list = true; > - g2d_userptr->in_pool = false; > - list_del_init(&g2d_userptr->list); > - > - break; > - } > - } > - > - g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); > - if (!g2d_userptr) > - return ERR_PTR(-ENOMEM); > - > - atomic_set(&g2d_userptr->refcount, 1); > - > - start = userptr & PAGE_MASK; > - offset = userptr & ~PAGE_MASK; > - end = PAGE_ALIGN(userptr + size); > - npages = (end - start) >> PAGE_SHIFT; > - g2d_userptr->npages = npages; > - > - pages = drm_calloc_large(npages, sizeof(struct page *)); > - if (!pages) { > - DRM_ERROR("failed to allocate pages.\n"); > - ret = -ENOMEM; > - goto err_free; > - } > - > - down_read(¤t->mm->mmap_sem); > - vma = find_vma(current->mm, userptr); > - if (!vma) { > - up_read(¤t->mm->mmap_sem); > - DRM_ERROR("failed to get vm region.\n"); > - ret = -EFAULT; > - goto err_free_pages; > - } > - > - if (vma->vm_end < userptr + size) { > - up_read(¤t->mm->mmap_sem); > - DRM_ERROR("vma is too small.\n"); > - ret = -EFAULT; > - goto err_free_pages; > - } > - > - g2d_userptr->vma = exynos_gem_get_vma(vma); > - if (!g2d_userptr->vma) { > - up_read(¤t->mm->mmap_sem); > - DRM_ERROR("failed to copy vma.\n"); > - ret = -ENOMEM; > - goto err_free_pages; > - } > - > - g2d_userptr->size = size; > - > - ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, > - npages, pages, vma); > - if (ret < 0) { > - up_read(¤t->mm->mmap_sem); > - DRM_ERROR("failed to get user pages from userptr.\n"); > - goto err_put_vma; > - } > - > - up_read(¤t->mm->mmap_sem); > - g2d_userptr->pages = pages; > - > - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); > - if (!sgt) { > - ret = -ENOMEM; > - goto err_free_userptr; > - } > - > - ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, > - size, GFP_KERNEL); > - if (ret < 0) { > - DRM_ERROR("failed to get sgt from pages.\n"); > - goto err_free_sgt; > - } > - > - g2d_userptr->sgt = sgt; > - > - ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt, > - DMA_BIDIRECTIONAL); > - if (ret < 0) { > - DRM_ERROR("failed to map sgt with dma region.\n"); > - goto err_sg_free_table; > - } > - > - g2d_userptr->dma_addr = sgt->sgl[0].dma_address; > - g2d_userptr->userptr = userptr; > - > - list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list); > - > - if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { > - g2d->current_pool += npages << PAGE_SHIFT; > - g2d_userptr->in_pool = true; > - } > - > - *obj = (unsigned long)g2d_userptr; > - > - return &g2d_userptr->dma_addr; > - > -err_sg_free_table: > - sg_free_table(sgt); > - > -err_free_sgt: > - kfree(sgt); > - > -err_free_userptr: > - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, > - g2d_userptr->npages, > - g2d_userptr->vma); > - > -err_put_vma: > - exynos_gem_put_vma(g2d_userptr->vma); > - > -err_free_pages: > - drm_free_large(pages); > - > -err_free: > - kfree(g2d_userptr); > - > - return ERR_PTR(ret); > -} > - > -static void g2d_userptr_free_all(struct drm_device *drm_dev, > - struct g2d_data *g2d, > - struct drm_file *filp) > -{ > - struct drm_exynos_file_private *file_priv = filp->driver_priv; > - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; > - struct g2d_cmdlist_userptr *g2d_userptr, *n; > - > - list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list) > - if (g2d_userptr->in_pool) > - g2d_userptr_put_dma_addr(drm_dev, > - (unsigned long)g2d_userptr, > - true); > - > - g2d->current_pool = 0; > -} > - > static enum g2d_reg_type g2d_get_reg_type(int reg_offset) > { > enum g2d_reg_type reg_type; > @@ -734,29 +491,8 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d, > goto err; > } > } else { > - struct drm_exynos_g2d_userptr g2d_userptr; > - > - if (copy_from_user(&g2d_userptr, (void __user *)handle, > - sizeof(struct drm_exynos_g2d_userptr))) { > - ret = -EFAULT; > - goto err; > - } > - > - if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, > - g2d_userptr.size)) { > - ret = -EFAULT; > - goto err; > - } > - > - addr = g2d_userptr_get_dma_addr(drm_dev, > - g2d_userptr.userptr, > - g2d_userptr.size, > - file, > - &handle); > - if (IS_ERR(addr)) { > - ret = -EFAULT; > - goto err; > - } > + ret = -EFAULT; > + goto err; > } > > cmdlist->data[reg_pos + 1] = *addr; > @@ -793,8 +529,7 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, > exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, > filp); > else > - g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, > - false); > + BUG(); > > buf_info->reg_types[i] = REG_TYPE_NONE; > buf_info->handles[reg_type] = 0; > @@ -1329,7 +1064,6 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev, > > INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); > INIT_LIST_HEAD(&g2d_priv->event_list); > - INIT_LIST_HEAD(&g2d_priv->userptr_list); > > return 0; > } > @@ -1363,9 +1097,6 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, > } > mutex_unlock(&g2d->cmdlist_mutex); > > - /* release all g2d_userptr in pool. */ > - g2d_userptr_free_all(drm_dev, g2d, file); > - > kfree(file_priv->g2d_priv); > } > > @@ -1433,8 +1164,6 @@ static int g2d_probe(struct platform_device *pdev) > goto err_put_clk; > } > > - g2d->max_pool = MAX_POOL; > - > platform_set_drvdata(pdev, g2d); > > subdrv = &g2d->subdrv; > diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c > index 163a054..cb624d9 100644 > --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c > +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c > @@ -496,66 +496,6 @@ void exynos_gem_put_vma(struct vm_area_struct *vma) > kfree(vma); > } > > -int exynos_gem_get_pages_from_userptr(unsigned long start, > - unsigned int npages, > - struct page **pages, > - struct vm_area_struct *vma) > -{ > - int get_npages; > - > - /* the memory region mmaped with VM_PFNMAP. */ > - if (vma_is_io(vma)) { > - unsigned int i; > - > - for (i = 0; i < npages; ++i, start += PAGE_SIZE) { > - unsigned long pfn; > - int ret = follow_pfn(vma, start, &pfn); > - if (ret) > - return ret; > - > - pages[i] = pfn_to_page(pfn); > - } > - > - if (i != npages) { > - DRM_ERROR("failed to get user_pages.\n"); > - return -EINVAL; > - } > - > - return 0; > - } > - > - get_npages = get_user_pages(current, current->mm, start, > - npages, 1, 1, pages, NULL); > - get_npages = max(get_npages, 0); > - if (get_npages != npages) { > - DRM_ERROR("failed to get user_pages.\n"); > - while (get_npages) > - put_page(pages[--get_npages]); > - return -EFAULT; > - } > - > - return 0; > -} > - > -void exynos_gem_put_pages_to_userptr(struct page **pages, > - unsigned int npages, > - struct vm_area_struct *vma) > -{ > - if (!vma_is_io(vma)) { > - unsigned int i; > - > - for (i = 0; i < npages; i++) { > - set_page_dirty_lock(pages[i]); > - > - /* > - * undo the reference we took when populating > - * the table. > - */ > - put_page(pages[i]); > - } > - } > -} > - > int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, > struct sg_table *sgt, > enum dma_data_direction dir) > diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h > index 1592c0b..07c00a3 100644 > --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h > +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h > @@ -21,7 +21,6 @@ > * exynos drm gem buffer structure. > * > * @kvaddr: kernel virtual address to allocated memory region. > - * *userptr: user space address. > * @dma_addr: bus address(accessed by dma) to allocated memory region. > * - this address could be physical address without IOMMU and > * device address with IOMMU. > @@ -29,19 +28,15 @@ > * @pages: Array of backing pages. > * @sgt: sg table to transfer page data. > * @size: size of allocated memory region. > - * @pfnmap: indicate whether memory region from userptr is mmaped with > - * VM_PFNMAP or not. > */ > struct exynos_drm_gem_buf { > void __iomem *kvaddr; > - unsigned long userptr; > dma_addr_t dma_addr; > struct dma_attrs dma_attrs; > unsigned int write; > struct page **pages; > struct sg_table *sgt; > unsigned long size; > - bool pfnmap; > }; > > /* > @@ -125,10 +120,6 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, > int exynos_drm_gem_mmap_buffer(struct file *filp, > struct vm_area_struct *vma); > > -/* map user space allocated by malloc to pages. */ > -int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, > - struct drm_file *file_priv); > - > /* get buffer information to memory region allocated by gem. */ > int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, > struct drm_file *file_priv); > @@ -168,17 +159,6 @@ struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); > /* release a userspace virtual memory area. */ > void exynos_gem_put_vma(struct vm_area_struct *vma); > > -/* get pages from user space. */ > -int exynos_gem_get_pages_from_userptr(unsigned long start, > - unsigned int npages, > - struct page **pages, > - struct vm_area_struct *vma); > - > -/* drop the reference to pages. */ > -void exynos_gem_put_pages_to_userptr(struct page **pages, > - unsigned int npages, > - struct vm_area_struct *vma); > - > /* map sgt with dma region. */ > int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, > struct sg_table *sgt, >
On Tue, Jul 01, 2014 at 05:55:25PM +0900, Inki Dae wrote: > > Hi Jerome, > > > On 2014? 07? 01? 06:46, j.glisse@gmail.com wrote: > > From: Jerome Glisse <jglisse@redhat.com> > > > > get_user_pages gives no garanty that page it returns are still > > the one backing the vma by the time it returns. Thus any ioctl > > One of pages from get_user_pages() could be migrated? I think all the > pages are pinned so that they cannot be migrated. If not such case, is > there other case I missed? I thought the ksm or gdb path were forcing unmap and replace but they still abide by the pin so while page will not be replace you are still violating few things. First you are pining memory without any kind of accouting from memcg point of view hence userspace can use that as a vector of attack to deplete memory. You also ignore any munmap that might happen, any munmap will not care about the page being pin or not. The vma will disappear. Nor you respect the page being remapped read only for page write back allowing the gpu to write to the page while I/O is in progress. > > One more thing, do you think this issue cannot be resolved so you are > trying to remove userptr feature from g2d driver? > I am just thinking that this is a somewhat evil api, i understand how nice it is for things like zero copy upload/download. But it allows to go around some of the memory policy set by other part of the kernel. But i guess that given you map/unmap accross cmd ioctl that is close to the direct-io usage pattern and thus can be forgiven. Note that if you were to start using GUP-fast then you will also have to consider the mmu_notifier range invalidation. Cheers, Jérôme > Thanks, > Inki Dae > > > that rely on this behavior is broken and rely on pure luck. To > > avoid any false hope from userspace stop such useage by simply > > flat out returning -EFAULT. Better to have a reliable behavior > > than to depend on pure luck and currently observed behavior of > > mm code. > > > > Note this was not even compile tested but i think i did update > > all places. > > > > Signed-off-by: Jérôme Glisse <jglisse@redhat.com> > > --- > > drivers/gpu/drm/exynos/exynos_drm_drv.h | 1 - > > drivers/gpu/drm/exynos/exynos_drm_g2d.c | 277 +------------------------------- > > drivers/gpu/drm/exynos/exynos_drm_gem.c | 60 ------- > > drivers/gpu/drm/exynos/exynos_drm_gem.h | 20 --- > > 4 files changed, 3 insertions(+), 355 deletions(-) > > > > diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h > > index 36535f3..7b55e89 100644 > > --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h > > +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h > > @@ -233,7 +233,6 @@ struct exynos_drm_g2d_private { > > struct device *dev; > > struct list_head inuse_cmdlist; > > struct list_head event_list; > > - struct list_head userptr_list; > > }; > > > > struct exynos_drm_ipp_private { > > diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c > > index 8001587..d0be6dc 100644 > > --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c > > +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c > > @@ -118,9 +118,6 @@ > > #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) > > #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) > > > > -/* maximum buffer pool size of userptr is 64MB as default */ > > -#define MAX_POOL (64 * 1024 * 1024) > > - > > enum { > > BUF_TYPE_GEM = 1, > > BUF_TYPE_USERPTR, > > @@ -185,19 +182,6 @@ struct drm_exynos_pending_g2d_event { > > struct drm_exynos_g2d_event event; > > }; > > > > -struct g2d_cmdlist_userptr { > > - struct list_head list; > > - dma_addr_t dma_addr; > > - unsigned long userptr; > > - unsigned long size; > > - struct page **pages; > > - unsigned int npages; > > - struct sg_table *sgt; > > - struct vm_area_struct *vma; > > - atomic_t refcount; > > - bool in_pool; > > - bool out_of_list; > > -}; > > struct g2d_cmdlist_node { > > struct list_head list; > > struct g2d_cmdlist *cmdlist; > > @@ -242,7 +226,6 @@ struct g2d_data { > > struct kmem_cache *runqueue_slab; > > > > unsigned long current_pool; > > - unsigned long max_pool; > > }; > > > > static int g2d_init_cmdlist(struct g2d_data *g2d) > > @@ -354,232 +337,6 @@ add_to_list: > > list_add_tail(&node->event->base.link, &g2d_priv->event_list); > > } > > > > -static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, > > - unsigned long obj, > > - bool force) > > -{ > > - struct g2d_cmdlist_userptr *g2d_userptr = > > - (struct g2d_cmdlist_userptr *)obj; > > - > > - if (!obj) > > - return; > > - > > - if (force) > > - goto out; > > - > > - atomic_dec(&g2d_userptr->refcount); > > - > > - if (atomic_read(&g2d_userptr->refcount) > 0) > > - return; > > - > > - if (g2d_userptr->in_pool) > > - return; > > - > > -out: > > - exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, > > - DMA_BIDIRECTIONAL); > > - > > - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, > > - g2d_userptr->npages, > > - g2d_userptr->vma); > > - > > - exynos_gem_put_vma(g2d_userptr->vma); > > - > > - if (!g2d_userptr->out_of_list) > > - list_del_init(&g2d_userptr->list); > > - > > - sg_free_table(g2d_userptr->sgt); > > - kfree(g2d_userptr->sgt); > > - > > - drm_free_large(g2d_userptr->pages); > > - kfree(g2d_userptr); > > -} > > - > > -static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, > > - unsigned long userptr, > > - unsigned long size, > > - struct drm_file *filp, > > - unsigned long *obj) > > -{ > > - struct drm_exynos_file_private *file_priv = filp->driver_priv; > > - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; > > - struct g2d_cmdlist_userptr *g2d_userptr; > > - struct g2d_data *g2d; > > - struct page **pages; > > - struct sg_table *sgt; > > - struct vm_area_struct *vma; > > - unsigned long start, end; > > - unsigned int npages, offset; > > - int ret; > > - > > - if (!size) { > > - DRM_ERROR("invalid userptr size.\n"); > > - return ERR_PTR(-EINVAL); > > - } > > - > > - g2d = dev_get_drvdata(g2d_priv->dev); > > - > > - /* check if userptr already exists in userptr_list. */ > > - list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) { > > - if (g2d_userptr->userptr == userptr) { > > - /* > > - * also check size because there could be same address > > - * and different size. > > - */ > > - if (g2d_userptr->size == size) { > > - atomic_inc(&g2d_userptr->refcount); > > - *obj = (unsigned long)g2d_userptr; > > - > > - return &g2d_userptr->dma_addr; > > - } > > - > > - /* > > - * at this moment, maybe g2d dma is accessing this > > - * g2d_userptr memory region so just remove this > > - * g2d_userptr object from userptr_list not to be > > - * referred again and also except it the userptr > > - * pool to be released after the dma access completion. > > - */ > > - g2d_userptr->out_of_list = true; > > - g2d_userptr->in_pool = false; > > - list_del_init(&g2d_userptr->list); > > - > > - break; > > - } > > - } > > - > > - g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); > > - if (!g2d_userptr) > > - return ERR_PTR(-ENOMEM); > > - > > - atomic_set(&g2d_userptr->refcount, 1); > > - > > - start = userptr & PAGE_MASK; > > - offset = userptr & ~PAGE_MASK; > > - end = PAGE_ALIGN(userptr + size); > > - npages = (end - start) >> PAGE_SHIFT; > > - g2d_userptr->npages = npages; > > - > > - pages = drm_calloc_large(npages, sizeof(struct page *)); > > - if (!pages) { > > - DRM_ERROR("failed to allocate pages.\n"); > > - ret = -ENOMEM; > > - goto err_free; > > - } > > - > > - down_read(¤t->mm->mmap_sem); > > - vma = find_vma(current->mm, userptr); > > - if (!vma) { > > - up_read(¤t->mm->mmap_sem); > > - DRM_ERROR("failed to get vm region.\n"); > > - ret = -EFAULT; > > - goto err_free_pages; > > - } > > - > > - if (vma->vm_end < userptr + size) { > > - up_read(¤t->mm->mmap_sem); > > - DRM_ERROR("vma is too small.\n"); > > - ret = -EFAULT; > > - goto err_free_pages; > > - } > > - > > - g2d_userptr->vma = exynos_gem_get_vma(vma); > > - if (!g2d_userptr->vma) { > > - up_read(¤t->mm->mmap_sem); > > - DRM_ERROR("failed to copy vma.\n"); > > - ret = -ENOMEM; > > - goto err_free_pages; > > - } > > - > > - g2d_userptr->size = size; > > - > > - ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, > > - npages, pages, vma); > > - if (ret < 0) { > > - up_read(¤t->mm->mmap_sem); > > - DRM_ERROR("failed to get user pages from userptr.\n"); > > - goto err_put_vma; > > - } > > - > > - up_read(¤t->mm->mmap_sem); > > - g2d_userptr->pages = pages; > > - > > - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); > > - if (!sgt) { > > - ret = -ENOMEM; > > - goto err_free_userptr; > > - } > > - > > - ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, > > - size, GFP_KERNEL); > > - if (ret < 0) { > > - DRM_ERROR("failed to get sgt from pages.\n"); > > - goto err_free_sgt; > > - } > > - > > - g2d_userptr->sgt = sgt; > > - > > - ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt, > > - DMA_BIDIRECTIONAL); > > - if (ret < 0) { > > - DRM_ERROR("failed to map sgt with dma region.\n"); > > - goto err_sg_free_table; > > - } > > - > > - g2d_userptr->dma_addr = sgt->sgl[0].dma_address; > > - g2d_userptr->userptr = userptr; > > - > > - list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list); > > - > > - if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { > > - g2d->current_pool += npages << PAGE_SHIFT; > > - g2d_userptr->in_pool = true; > > - } > > - > > - *obj = (unsigned long)g2d_userptr; > > - > > - return &g2d_userptr->dma_addr; > > - > > -err_sg_free_table: > > - sg_free_table(sgt); > > - > > -err_free_sgt: > > - kfree(sgt); > > - > > -err_free_userptr: > > - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, > > - g2d_userptr->npages, > > - g2d_userptr->vma); > > - > > -err_put_vma: > > - exynos_gem_put_vma(g2d_userptr->vma); > > - > > -err_free_pages: > > - drm_free_large(pages); > > - > > -err_free: > > - kfree(g2d_userptr); > > - > > - return ERR_PTR(ret); > > -} > > - > > -static void g2d_userptr_free_all(struct drm_device *drm_dev, > > - struct g2d_data *g2d, > > - struct drm_file *filp) > > -{ > > - struct drm_exynos_file_private *file_priv = filp->driver_priv; > > - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; > > - struct g2d_cmdlist_userptr *g2d_userptr, *n; > > - > > - list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list) > > - if (g2d_userptr->in_pool) > > - g2d_userptr_put_dma_addr(drm_dev, > > - (unsigned long)g2d_userptr, > > - true); > > - > > - g2d->current_pool = 0; > > -} > > - > > static enum g2d_reg_type g2d_get_reg_type(int reg_offset) > > { > > enum g2d_reg_type reg_type; > > @@ -734,29 +491,8 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d, > > goto err; > > } > > } else { > > - struct drm_exynos_g2d_userptr g2d_userptr; > > - > > - if (copy_from_user(&g2d_userptr, (void __user *)handle, > > - sizeof(struct drm_exynos_g2d_userptr))) { > > - ret = -EFAULT; > > - goto err; > > - } > > - > > - if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, > > - g2d_userptr.size)) { > > - ret = -EFAULT; > > - goto err; > > - } > > - > > - addr = g2d_userptr_get_dma_addr(drm_dev, > > - g2d_userptr.userptr, > > - g2d_userptr.size, > > - file, > > - &handle); > > - if (IS_ERR(addr)) { > > - ret = -EFAULT; > > - goto err; > > - } > > + ret = -EFAULT; > > + goto err; > > } > > > > cmdlist->data[reg_pos + 1] = *addr; > > @@ -793,8 +529,7 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, > > exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, > > filp); > > else > > - g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, > > - false); > > + BUG(); > > > > buf_info->reg_types[i] = REG_TYPE_NONE; > > buf_info->handles[reg_type] = 0; > > @@ -1329,7 +1064,6 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev, > > > > INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); > > INIT_LIST_HEAD(&g2d_priv->event_list); > > - INIT_LIST_HEAD(&g2d_priv->userptr_list); > > > > return 0; > > } > > @@ -1363,9 +1097,6 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, > > } > > mutex_unlock(&g2d->cmdlist_mutex); > > > > - /* release all g2d_userptr in pool. */ > > - g2d_userptr_free_all(drm_dev, g2d, file); > > - > > kfree(file_priv->g2d_priv); > > } > > > > @@ -1433,8 +1164,6 @@ static int g2d_probe(struct platform_device *pdev) > > goto err_put_clk; > > } > > > > - g2d->max_pool = MAX_POOL; > > - > > platform_set_drvdata(pdev, g2d); > > > > subdrv = &g2d->subdrv; > > diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c > > index 163a054..cb624d9 100644 > > --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c > > +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c > > @@ -496,66 +496,6 @@ void exynos_gem_put_vma(struct vm_area_struct *vma) > > kfree(vma); > > } > > > > -int exynos_gem_get_pages_from_userptr(unsigned long start, > > - unsigned int npages, > > - struct page **pages, > > - struct vm_area_struct *vma) > > -{ > > - int get_npages; > > - > > - /* the memory region mmaped with VM_PFNMAP. */ > > - if (vma_is_io(vma)) { > > - unsigned int i; > > - > > - for (i = 0; i < npages; ++i, start += PAGE_SIZE) { > > - unsigned long pfn; > > - int ret = follow_pfn(vma, start, &pfn); > > - if (ret) > > - return ret; > > - > > - pages[i] = pfn_to_page(pfn); > > - } > > - > > - if (i != npages) { > > - DRM_ERROR("failed to get user_pages.\n"); > > - return -EINVAL; > > - } > > - > > - return 0; > > - } > > - > > - get_npages = get_user_pages(current, current->mm, start, > > - npages, 1, 1, pages, NULL); > > - get_npages = max(get_npages, 0); > > - if (get_npages != npages) { > > - DRM_ERROR("failed to get user_pages.\n"); > > - while (get_npages) > > - put_page(pages[--get_npages]); > > - return -EFAULT; > > - } > > - > > - return 0; > > -} > > - > > -void exynos_gem_put_pages_to_userptr(struct page **pages, > > - unsigned int npages, > > - struct vm_area_struct *vma) > > -{ > > - if (!vma_is_io(vma)) { > > - unsigned int i; > > - > > - for (i = 0; i < npages; i++) { > > - set_page_dirty_lock(pages[i]); > > - > > - /* > > - * undo the reference we took when populating > > - * the table. > > - */ > > - put_page(pages[i]); > > - } > > - } > > -} > > - > > int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, > > struct sg_table *sgt, > > enum dma_data_direction dir) > > diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h > > index 1592c0b..07c00a3 100644 > > --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h > > +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h > > @@ -21,7 +21,6 @@ > > * exynos drm gem buffer structure. > > * > > * @kvaddr: kernel virtual address to allocated memory region. > > - * *userptr: user space address. > > * @dma_addr: bus address(accessed by dma) to allocated memory region. > > * - this address could be physical address without IOMMU and > > * device address with IOMMU. > > @@ -29,19 +28,15 @@ > > * @pages: Array of backing pages. > > * @sgt: sg table to transfer page data. > > * @size: size of allocated memory region. > > - * @pfnmap: indicate whether memory region from userptr is mmaped with > > - * VM_PFNMAP or not. > > */ > > struct exynos_drm_gem_buf { > > void __iomem *kvaddr; > > - unsigned long userptr; > > dma_addr_t dma_addr; > > struct dma_attrs dma_attrs; > > unsigned int write; > > struct page **pages; > > struct sg_table *sgt; > > unsigned long size; > > - bool pfnmap; > > }; > > > > /* > > @@ -125,10 +120,6 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, > > int exynos_drm_gem_mmap_buffer(struct file *filp, > > struct vm_area_struct *vma); > > > > -/* map user space allocated by malloc to pages. */ > > -int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, > > - struct drm_file *file_priv); > > - > > /* get buffer information to memory region allocated by gem. */ > > int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, > > struct drm_file *file_priv); > > @@ -168,17 +159,6 @@ struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); > > /* release a userspace virtual memory area. */ > > void exynos_gem_put_vma(struct vm_area_struct *vma); > > > > -/* get pages from user space. */ > > -int exynos_gem_get_pages_from_userptr(unsigned long start, > > - unsigned int npages, > > - struct page **pages, > > - struct vm_area_struct *vma); > > - > > -/* drop the reference to pages. */ > > -void exynos_gem_put_pages_to_userptr(struct page **pages, > > - unsigned int npages, > > - struct vm_area_struct *vma); > > - > > /* map sgt with dma region. */ > > int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, > > struct sg_table *sgt, > > >
2014-07-02 0:03 GMT+09:00 Jerome Glisse <j.glisse@gmail.com>: > On Tue, Jul 01, 2014 at 05:55:25PM +0900, Inki Dae wrote: >> >> Hi Jerome, >> >> >> On 2014? 07? 01? 06:46, j.glisse@gmail.com wrote: >> > From: Jerome Glisse <jglisse@redhat.com> >> > >> > get_user_pages gives no garanty that page it returns are still >> > the one backing the vma by the time it returns. Thus any ioctl >> >> One of pages from get_user_pages() could be migrated? I think all the >> pages are pinned so that they cannot be migrated. If not such case, is >> there other case I missed? > > I thought the ksm or gdb path were forcing unmap and replace but they > still abide by the pin so while page will not be replace you are still > violating few things. First you are pining memory without any kind of > accouting from memcg point of view hence userspace can use that as a > vector of attack to deplete memory. Hm... right. The attack vector could deplete of system memory because g2d are pinning all pages from get_user_pages(). But all processes cannot do it: g2d ioctls can be used by process authorized by master or root process. Of course, if more strict rule is required, we can limit the size of memory to be used by g2d. > > You also ignore any munmap that might happen, any munmap will not care > about the page being pin or not. The vma will disappear. The vma would mean userspace. Physical pages will be freed once g2d dma releases them. The important thing is that the pages from get_user_pages() are only valid while g2d dma is running so once the dma operation is completed the pages will be freed. And also g2d driver doesn't provide any interface that user process can access the pages. Give me more details what is the problem? > > Nor you respect the page being remapped read only for page write back > allowing the gpu to write to the page while I/O is in progress. You mean that it is possible for the g2d dma to access the pages for write while I/O operation: at this moment, the pages have read only attribute? > >> >> One more thing, do you think this issue cannot be resolved so you are >> trying to remove userptr feature from g2d driver? >> > > I am just thinking that this is a somewhat evil api, i understand how > nice it is for things like zero copy upload/download. But it allows to > go around some of the memory policy set by other part of the kernel. > > But i guess that given you map/unmap accross cmd ioctl that is close > to the direct-io usage pattern and thus can be forgiven. > > Note that if you were to start using GUP-fast then you will also have > to consider the mmu_notifier range invalidation. s/GUP-fast/GPU-fast. Hm.. I think the page tables, one is for user space and other is for device space, are different each other: they - cpu and dma - use fully separated page table in case of Exynos drm. So I don't understand why g2d driver have to consider the mmu_nitifier or relevant things. If there is my missing point, can you give me more details? Thanks, Inki Dae > > Cheers, > Jérôme > >> Thanks, >> Inki Dae >> >> > that rely on this behavior is broken and rely on pure luck. To >> > avoid any false hope from userspace stop such useage by simply >> > flat out returning -EFAULT. Better to have a reliable behavior >> > than to depend on pure luck and currently observed behavior of >> > mm code. >> > >> > Note this was not even compile tested but i think i did update >> > all places. >> > >> > Signed-off-by: Jérôme Glisse <jglisse@redhat.com> >> > --- >> > drivers/gpu/drm/exynos/exynos_drm_drv.h | 1 - >> > drivers/gpu/drm/exynos/exynos_drm_g2d.c | 277 +------------------------------- >> > drivers/gpu/drm/exynos/exynos_drm_gem.c | 60 ------- >> > drivers/gpu/drm/exynos/exynos_drm_gem.h | 20 --- >> > 4 files changed, 3 insertions(+), 355 deletions(-) >> > >> > diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h >> > index 36535f3..7b55e89 100644 >> > --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h >> > +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h >> > @@ -233,7 +233,6 @@ struct exynos_drm_g2d_private { >> > struct device *dev; >> > struct list_head inuse_cmdlist; >> > struct list_head event_list; >> > - struct list_head userptr_list; >> > }; >> > >> > struct exynos_drm_ipp_private { >> > diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c >> > index 8001587..d0be6dc 100644 >> > --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c >> > +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c >> > @@ -118,9 +118,6 @@ >> > #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) >> > #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) >> > >> > -/* maximum buffer pool size of userptr is 64MB as default */ >> > -#define MAX_POOL (64 * 1024 * 1024) >> > - >> > enum { >> > BUF_TYPE_GEM = 1, >> > BUF_TYPE_USERPTR, >> > @@ -185,19 +182,6 @@ struct drm_exynos_pending_g2d_event { >> > struct drm_exynos_g2d_event event; >> > }; >> > >> > -struct g2d_cmdlist_userptr { >> > - struct list_head list; >> > - dma_addr_t dma_addr; >> > - unsigned long userptr; >> > - unsigned long size; >> > - struct page **pages; >> > - unsigned int npages; >> > - struct sg_table *sgt; >> > - struct vm_area_struct *vma; >> > - atomic_t refcount; >> > - bool in_pool; >> > - bool out_of_list; >> > -}; >> > struct g2d_cmdlist_node { >> > struct list_head list; >> > struct g2d_cmdlist *cmdlist; >> > @@ -242,7 +226,6 @@ struct g2d_data { >> > struct kmem_cache *runqueue_slab; >> > >> > unsigned long current_pool; >> > - unsigned long max_pool; >> > }; >> > >> > static int g2d_init_cmdlist(struct g2d_data *g2d) >> > @@ -354,232 +337,6 @@ add_to_list: >> > list_add_tail(&node->event->base.link, &g2d_priv->event_list); >> > } >> > >> > -static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, >> > - unsigned long obj, >> > - bool force) >> > -{ >> > - struct g2d_cmdlist_userptr *g2d_userptr = >> > - (struct g2d_cmdlist_userptr *)obj; >> > - >> > - if (!obj) >> > - return; >> > - >> > - if (force) >> > - goto out; >> > - >> > - atomic_dec(&g2d_userptr->refcount); >> > - >> > - if (atomic_read(&g2d_userptr->refcount) > 0) >> > - return; >> > - >> > - if (g2d_userptr->in_pool) >> > - return; >> > - >> > -out: >> > - exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, >> > - DMA_BIDIRECTIONAL); >> > - >> > - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, >> > - g2d_userptr->npages, >> > - g2d_userptr->vma); >> > - >> > - exynos_gem_put_vma(g2d_userptr->vma); >> > - >> > - if (!g2d_userptr->out_of_list) >> > - list_del_init(&g2d_userptr->list); >> > - >> > - sg_free_table(g2d_userptr->sgt); >> > - kfree(g2d_userptr->sgt); >> > - >> > - drm_free_large(g2d_userptr->pages); >> > - kfree(g2d_userptr); >> > -} >> > - >> > -static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, >> > - unsigned long userptr, >> > - unsigned long size, >> > - struct drm_file *filp, >> > - unsigned long *obj) >> > -{ >> > - struct drm_exynos_file_private *file_priv = filp->driver_priv; >> > - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; >> > - struct g2d_cmdlist_userptr *g2d_userptr; >> > - struct g2d_data *g2d; >> > - struct page **pages; >> > - struct sg_table *sgt; >> > - struct vm_area_struct *vma; >> > - unsigned long start, end; >> > - unsigned int npages, offset; >> > - int ret; >> > - >> > - if (!size) { >> > - DRM_ERROR("invalid userptr size.\n"); >> > - return ERR_PTR(-EINVAL); >> > - } >> > - >> > - g2d = dev_get_drvdata(g2d_priv->dev); >> > - >> > - /* check if userptr already exists in userptr_list. */ >> > - list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) { >> > - if (g2d_userptr->userptr == userptr) { >> > - /* >> > - * also check size because there could be same address >> > - * and different size. >> > - */ >> > - if (g2d_userptr->size == size) { >> > - atomic_inc(&g2d_userptr->refcount); >> > - *obj = (unsigned long)g2d_userptr; >> > - >> > - return &g2d_userptr->dma_addr; >> > - } >> > - >> > - /* >> > - * at this moment, maybe g2d dma is accessing this >> > - * g2d_userptr memory region so just remove this >> > - * g2d_userptr object from userptr_list not to be >> > - * referred again and also except it the userptr >> > - * pool to be released after the dma access completion. >> > - */ >> > - g2d_userptr->out_of_list = true; >> > - g2d_userptr->in_pool = false; >> > - list_del_init(&g2d_userptr->list); >> > - >> > - break; >> > - } >> > - } >> > - >> > - g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); >> > - if (!g2d_userptr) >> > - return ERR_PTR(-ENOMEM); >> > - >> > - atomic_set(&g2d_userptr->refcount, 1); >> > - >> > - start = userptr & PAGE_MASK; >> > - offset = userptr & ~PAGE_MASK; >> > - end = PAGE_ALIGN(userptr + size); >> > - npages = (end - start) >> PAGE_SHIFT; >> > - g2d_userptr->npages = npages; >> > - >> > - pages = drm_calloc_large(npages, sizeof(struct page *)); >> > - if (!pages) { >> > - DRM_ERROR("failed to allocate pages.\n"); >> > - ret = -ENOMEM; >> > - goto err_free; >> > - } >> > - >> > - down_read(¤t->mm->mmap_sem); >> > - vma = find_vma(current->mm, userptr); >> > - if (!vma) { >> > - up_read(¤t->mm->mmap_sem); >> > - DRM_ERROR("failed to get vm region.\n"); >> > - ret = -EFAULT; >> > - goto err_free_pages; >> > - } >> > - >> > - if (vma->vm_end < userptr + size) { >> > - up_read(¤t->mm->mmap_sem); >> > - DRM_ERROR("vma is too small.\n"); >> > - ret = -EFAULT; >> > - goto err_free_pages; >> > - } >> > - >> > - g2d_userptr->vma = exynos_gem_get_vma(vma); >> > - if (!g2d_userptr->vma) { >> > - up_read(¤t->mm->mmap_sem); >> > - DRM_ERROR("failed to copy vma.\n"); >> > - ret = -ENOMEM; >> > - goto err_free_pages; >> > - } >> > - >> > - g2d_userptr->size = size; >> > - >> > - ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, >> > - npages, pages, vma); >> > - if (ret < 0) { >> > - up_read(¤t->mm->mmap_sem); >> > - DRM_ERROR("failed to get user pages from userptr.\n"); >> > - goto err_put_vma; >> > - } >> > - >> > - up_read(¤t->mm->mmap_sem); >> > - g2d_userptr->pages = pages; >> > - >> > - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); >> > - if (!sgt) { >> > - ret = -ENOMEM; >> > - goto err_free_userptr; >> > - } >> > - >> > - ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, >> > - size, GFP_KERNEL); >> > - if (ret < 0) { >> > - DRM_ERROR("failed to get sgt from pages.\n"); >> > - goto err_free_sgt; >> > - } >> > - >> > - g2d_userptr->sgt = sgt; >> > - >> > - ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt, >> > - DMA_BIDIRECTIONAL); >> > - if (ret < 0) { >> > - DRM_ERROR("failed to map sgt with dma region.\n"); >> > - goto err_sg_free_table; >> > - } >> > - >> > - g2d_userptr->dma_addr = sgt->sgl[0].dma_address; >> > - g2d_userptr->userptr = userptr; >> > - >> > - list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list); >> > - >> > - if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { >> > - g2d->current_pool += npages << PAGE_SHIFT; >> > - g2d_userptr->in_pool = true; >> > - } >> > - >> > - *obj = (unsigned long)g2d_userptr; >> > - >> > - return &g2d_userptr->dma_addr; >> > - >> > -err_sg_free_table: >> > - sg_free_table(sgt); >> > - >> > -err_free_sgt: >> > - kfree(sgt); >> > - >> > -err_free_userptr: >> > - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, >> > - g2d_userptr->npages, >> > - g2d_userptr->vma); >> > - >> > -err_put_vma: >> > - exynos_gem_put_vma(g2d_userptr->vma); >> > - >> > -err_free_pages: >> > - drm_free_large(pages); >> > - >> > -err_free: >> > - kfree(g2d_userptr); >> > - >> > - return ERR_PTR(ret); >> > -} >> > - >> > -static void g2d_userptr_free_all(struct drm_device *drm_dev, >> > - struct g2d_data *g2d, >> > - struct drm_file *filp) >> > -{ >> > - struct drm_exynos_file_private *file_priv = filp->driver_priv; >> > - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; >> > - struct g2d_cmdlist_userptr *g2d_userptr, *n; >> > - >> > - list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list) >> > - if (g2d_userptr->in_pool) >> > - g2d_userptr_put_dma_addr(drm_dev, >> > - (unsigned long)g2d_userptr, >> > - true); >> > - >> > - g2d->current_pool = 0; >> > -} >> > - >> > static enum g2d_reg_type g2d_get_reg_type(int reg_offset) >> > { >> > enum g2d_reg_type reg_type; >> > @@ -734,29 +491,8 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d, >> > goto err; >> > } >> > } else { >> > - struct drm_exynos_g2d_userptr g2d_userptr; >> > - >> > - if (copy_from_user(&g2d_userptr, (void __user *)handle, >> > - sizeof(struct drm_exynos_g2d_userptr))) { >> > - ret = -EFAULT; >> > - goto err; >> > - } >> > - >> > - if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, >> > - g2d_userptr.size)) { >> > - ret = -EFAULT; >> > - goto err; >> > - } >> > - >> > - addr = g2d_userptr_get_dma_addr(drm_dev, >> > - g2d_userptr.userptr, >> > - g2d_userptr.size, >> > - file, >> > - &handle); >> > - if (IS_ERR(addr)) { >> > - ret = -EFAULT; >> > - goto err; >> > - } >> > + ret = -EFAULT; >> > + goto err; >> > } >> > >> > cmdlist->data[reg_pos + 1] = *addr; >> > @@ -793,8 +529,7 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, >> > exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, >> > filp); >> > else >> > - g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, >> > - false); >> > + BUG(); >> > >> > buf_info->reg_types[i] = REG_TYPE_NONE; >> > buf_info->handles[reg_type] = 0; >> > @@ -1329,7 +1064,6 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev, >> > >> > INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); >> > INIT_LIST_HEAD(&g2d_priv->event_list); >> > - INIT_LIST_HEAD(&g2d_priv->userptr_list); >> > >> > return 0; >> > } >> > @@ -1363,9 +1097,6 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, >> > } >> > mutex_unlock(&g2d->cmdlist_mutex); >> > >> > - /* release all g2d_userptr in pool. */ >> > - g2d_userptr_free_all(drm_dev, g2d, file); >> > - >> > kfree(file_priv->g2d_priv); >> > } >> > >> > @@ -1433,8 +1164,6 @@ static int g2d_probe(struct platform_device *pdev) >> > goto err_put_clk; >> > } >> > >> > - g2d->max_pool = MAX_POOL; >> > - >> > platform_set_drvdata(pdev, g2d); >> > >> > subdrv = &g2d->subdrv; >> > diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c >> > index 163a054..cb624d9 100644 >> > --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c >> > +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c >> > @@ -496,66 +496,6 @@ void exynos_gem_put_vma(struct vm_area_struct *vma) >> > kfree(vma); >> > } >> > >> > -int exynos_gem_get_pages_from_userptr(unsigned long start, >> > - unsigned int npages, >> > - struct page **pages, >> > - struct vm_area_struct *vma) >> > -{ >> > - int get_npages; >> > - >> > - /* the memory region mmaped with VM_PFNMAP. */ >> > - if (vma_is_io(vma)) { >> > - unsigned int i; >> > - >> > - for (i = 0; i < npages; ++i, start += PAGE_SIZE) { >> > - unsigned long pfn; >> > - int ret = follow_pfn(vma, start, &pfn); >> > - if (ret) >> > - return ret; >> > - >> > - pages[i] = pfn_to_page(pfn); >> > - } >> > - >> > - if (i != npages) { >> > - DRM_ERROR("failed to get user_pages.\n"); >> > - return -EINVAL; >> > - } >> > - >> > - return 0; >> > - } >> > - >> > - get_npages = get_user_pages(current, current->mm, start, >> > - npages, 1, 1, pages, NULL); >> > - get_npages = max(get_npages, 0); >> > - if (get_npages != npages) { >> > - DRM_ERROR("failed to get user_pages.\n"); >> > - while (get_npages) >> > - put_page(pages[--get_npages]); >> > - return -EFAULT; >> > - } >> > - >> > - return 0; >> > -} >> > - >> > -void exynos_gem_put_pages_to_userptr(struct page **pages, >> > - unsigned int npages, >> > - struct vm_area_struct *vma) >> > -{ >> > - if (!vma_is_io(vma)) { >> > - unsigned int i; >> > - >> > - for (i = 0; i < npages; i++) { >> > - set_page_dirty_lock(pages[i]); >> > - >> > - /* >> > - * undo the reference we took when populating >> > - * the table. >> > - */ >> > - put_page(pages[i]); >> > - } >> > - } >> > -} >> > - >> > int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, >> > struct sg_table *sgt, >> > enum dma_data_direction dir) >> > diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h >> > index 1592c0b..07c00a3 100644 >> > --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h >> > +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h >> > @@ -21,7 +21,6 @@ >> > * exynos drm gem buffer structure. >> > * >> > * @kvaddr: kernel virtual address to allocated memory region. >> > - * *userptr: user space address. >> > * @dma_addr: bus address(accessed by dma) to allocated memory region. >> > * - this address could be physical address without IOMMU and >> > * device address with IOMMU. >> > @@ -29,19 +28,15 @@ >> > * @pages: Array of backing pages. >> > * @sgt: sg table to transfer page data. >> > * @size: size of allocated memory region. >> > - * @pfnmap: indicate whether memory region from userptr is mmaped with >> > - * VM_PFNMAP or not. >> > */ >> > struct exynos_drm_gem_buf { >> > void __iomem *kvaddr; >> > - unsigned long userptr; >> > dma_addr_t dma_addr; >> > struct dma_attrs dma_attrs; >> > unsigned int write; >> > struct page **pages; >> > struct sg_table *sgt; >> > unsigned long size; >> > - bool pfnmap; >> > }; >> > >> > /* >> > @@ -125,10 +120,6 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, >> > int exynos_drm_gem_mmap_buffer(struct file *filp, >> > struct vm_area_struct *vma); >> > >> > -/* map user space allocated by malloc to pages. */ >> > -int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, >> > - struct drm_file *file_priv); >> > - >> > /* get buffer information to memory region allocated by gem. */ >> > int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, >> > struct drm_file *file_priv); >> > @@ -168,17 +159,6 @@ struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); >> > /* release a userspace virtual memory area. */ >> > void exynos_gem_put_vma(struct vm_area_struct *vma); >> > >> > -/* get pages from user space. */ >> > -int exynos_gem_get_pages_from_userptr(unsigned long start, >> > - unsigned int npages, >> > - struct page **pages, >> > - struct vm_area_struct *vma); >> > - >> > -/* drop the reference to pages. */ >> > -void exynos_gem_put_pages_to_userptr(struct page **pages, >> > - unsigned int npages, >> > - struct vm_area_struct *vma); >> > - >> > /* map sgt with dma region. */ >> > int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, >> > struct sg_table *sgt, >> > >> > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/dri-devel
On Thu, Jul 03, 2014 at 12:09:25AM +0900, Inki Dae wrote: > 2014-07-02 0:03 GMT+09:00 Jerome Glisse <j.glisse@gmail.com>: > > On Tue, Jul 01, 2014 at 05:55:25PM +0900, Inki Dae wrote: > >> > >> Hi Jerome, > >> > >> > >> On 2014? 07? 01? 06:46, j.glisse@gmail.com wrote: > >> > From: Jerome Glisse <jglisse@redhat.com> > >> > > >> > get_user_pages gives no garanty that page it returns are still > >> > the one backing the vma by the time it returns. Thus any ioctl > >> > >> One of pages from get_user_pages() could be migrated? I think all the > >> pages are pinned so that they cannot be migrated. If not such case, is > >> there other case I missed? > > > > I thought the ksm or gdb path were forcing unmap and replace but they > > still abide by the pin so while page will not be replace you are still > > violating few things. First you are pining memory without any kind of > > accouting from memcg point of view hence userspace can use that as a > > vector of attack to deplete memory. > > Hm... right. The attack vector could deplete of system memory because > g2d are pinning all pages from get_user_pages(). But all processes > cannot do it: g2d ioctls can be used by process authorized by master > or root process. Of course, if more strict rule is required, we can > limit the size of memory to be used by g2d. Well i just wanted to stress that pining page that are on the lru do not have same effect for mm point of view a page that are part of regular gem object for the gpu. > > > > > You also ignore any munmap that might happen, any munmap will not care > > about the page being pin or not. The vma will disappear. > > The vma would mean userspace. Physical pages will be freed once g2d > dma releases them. The important thing is that the pages from > get_user_pages() are only valid while g2d dma is running so once the > dma operation is completed the pages will be freed. And also g2d > driver doesn't provide any interface that user process can access the > pages. Give me more details what is the problem? > > > > > Nor you respect the page being remapped read only for page write back > > allowing the gpu to write to the page while I/O is in progress. > > You mean that it is possible for the g2d dma to access the pages for > write while I/O operation: at this moment, the pages have read only > attribute? > > > > >> > >> One more thing, do you think this issue cannot be resolved so you are > >> trying to remove userptr feature from g2d driver? > >> > > > > I am just thinking that this is a somewhat evil api, i understand how > > nice it is for things like zero copy upload/download. But it allows to > > go around some of the memory policy set by other part of the kernel. > > > > But i guess that given you map/unmap accross cmd ioctl that is close > > to the direct-io usage pattern and thus can be forgiven. > > > > Note that if you were to start using GUP-fast then you will also have > > to consider the mmu_notifier range invalidation. > > s/GUP-fast/GPU-fast. > > Hm.. I think the page tables, one is for user space and other is for > device space, are different each other: they - cpu and dma - use fully > separated page table in case of Exynos drm. So I don't understand why > g2d driver have to consider the mmu_nitifier or relevant things. If > there is my missing point, can you give me more details? I was talking about get_user_pages_fast for which you have no garanty of exclusion against fork, truncate, munmap, ... Anyway as this is upstream i guess you can keep it. This is just an horrible API that allow to circumvant any limit set by memcg for page locking and all. But anyway GPU driver never played in the same ballpark as other driver. Cheers, Jérôme
On Wed, Jul 02, 2014 at 11:25:19AM -0400, Jerome Glisse wrote: > Anyway as this is upstream i guess you can keep it. This is just an horrible > API that allow to circumvant any limit set by memcg for page locking and all. > But anyway GPU driver never played in the same ballpark as other driver. I agree that exynos userptr as-is should be removed since as opposed to the i915 implementation it doesn't play nice with the core mm and allos unpriviledged userspace to exceed all mlock limits. I've taken considerable amounts of internal heat for rejecting i915 userptr until the mmu notifier stuff was implemented and so really want exynos to fix this up asap. Dave? Thanks, Daniel
2014-07-08 22:37 GMT+09:00 Daniel Vetter <daniel@ffwll.ch>: > On Wed, Jul 02, 2014 at 11:25:19AM -0400, Jerome Glisse wrote: >> Anyway as this is upstream i guess you can keep it. This is just an horrible >> API that allow to circumvant any limit set by memcg for page locking and all. >> But anyway GPU driver never played in the same ballpark as other driver. > > I agree that exynos userptr as-is should be removed since as opposed to > the i915 implementation it doesn't play nice with the core mm Can you give me more details why you think so? > and allos > unpriviledged userspace to exceed all mlock limits. we can make the userptr ioctl to have master permission for the meantime. Thanks, Inki Dae > > I've taken considerable amounts of internal heat for rejecting i915 > userptr until the mmu notifier stuff was implemented and so really want > exynos to fix this up asap. > > Dave? > > Thanks, Daniel > -- > Daniel Vetter > Software Engineer, Intel Corporation > +41 (0) 79 365 57 48 - http://blog.ffwll.ch > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/dri-devel
On Tue, Jul 8, 2014 at 6:20 PM, Inki Dae <inki.dae@samsung.com> wrote: > 2014-07-08 22:37 GMT+09:00 Daniel Vetter <daniel@ffwll.ch>: >> On Wed, Jul 02, 2014 at 11:25:19AM -0400, Jerome Glisse wrote: >>> Anyway as this is upstream i guess you can keep it. This is just an horrible >>> API that allow to circumvant any limit set by memcg for page locking and all. >>> But anyway GPU driver never played in the same ballpark as other driver. >> >> I agree that exynos userptr as-is should be removed since as opposed to >> the i915 implementation it doesn't play nice with the core mm > > Can you give me more details why you think so? From a very quick look there's two pieces: - The implementation with the vma tricks looks _really_ scary. You'd need to have Al Viro's opinion on it though. - If I'm reading the code correctly userspace can pin unlimted amounts of memory, but I've gotten a bit lost in the code. In i915 we have shrinkers and mmu notifier to make sure that if the vm needs this memory again we'll make it available. -Daniel
On 2014? 07? 09? 18:23, Daniel Vetter wrote: > On Tue, Jul 8, 2014 at 6:20 PM, Inki Dae <inki.dae@samsung.com> wrote: >> 2014-07-08 22:37 GMT+09:00 Daniel Vetter <daniel@ffwll.ch>: >>> On Wed, Jul 02, 2014 at 11:25:19AM -0400, Jerome Glisse wrote: >>>> Anyway as this is upstream i guess you can keep it. This is just an horrible >>>> API that allow to circumvant any limit set by memcg for page locking and all. >>>> But anyway GPU driver never played in the same ballpark as other driver. >>> >>> I agree that exynos userptr as-is should be removed since as opposed to >>> the i915 implementation it doesn't play nice with the core mm >> >> Can you give me more details why you think so? > >>From a very quick look there's two pieces: > - The implementation with the vma tricks looks _really_ scary. You'd > need to have Al Viro's opinion on it though. You mean that it checks VM_DONTCOPY flag before copying vma? If so, I really forgot it. > - If I'm reading the code correctly userspace can pin unlimted amounts > of memory, but I've gotten a bit lost in the code. In i915 we have Not so. g2d driver is checking if user-requested buffer size is more than maximum capacity of g2d dma. So it can never pin unlimited amounts of memory. > shrinkers and mmu notifier to make sure that if the vm needs this > memory again we'll make it available. I am not familiar to mmu notifier. I will look into how i915 driver handle it. Thanks for comments, Inki Dae > -Daniel >
On Thu, Jul 10, 2014 at 12:39:24AM +0900, Inki Dae wrote: > On 2014? 07? 09? 18:23, Daniel Vetter wrote: > > On Tue, Jul 8, 2014 at 6:20 PM, Inki Dae <inki.dae@samsung.com> wrote: > >> 2014-07-08 22:37 GMT+09:00 Daniel Vetter <daniel@ffwll.ch>: > >>> On Wed, Jul 02, 2014 at 11:25:19AM -0400, Jerome Glisse wrote: > >>>> Anyway as this is upstream i guess you can keep it. This is just an horrible > >>>> API that allow to circumvant any limit set by memcg for page locking and all. > >>>> But anyway GPU driver never played in the same ballpark as other driver. > >>> > >>> I agree that exynos userptr as-is should be removed since as opposed to > >>> the i915 implementation it doesn't play nice with the core mm > >> > >> Can you give me more details why you think so? > > > >>From a very quick look there's two pieces: > > - The implementation with the vma tricks looks _really_ scary. You'd > > need to have Al Viro's opinion on it though. > > You mean that it checks VM_DONTCOPY flag before copying vma? If so, I > really forgot it. > > > - If I'm reading the code correctly userspace can pin unlimted amounts > > of memory, but I've gotten a bit lost in the code. In i915 we have > > Not so. g2d driver is checking if user-requested buffer size is more > than maximum capacity of g2d dma. So it can never pin unlimited amounts > of memory. Ah I didn't spot this check. I guess if the g2d dma size is a natural limit then we're fine. Nowadays on recent i915 hw/sw we can use all of system memory, so can't rely on some hw limit to limit the amount of memory userspace can mlock. But if g2d has that (and there's enough memory left for the kernel to not fall over) you should be fine. -Daniel
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 36535f3..7b55e89 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -233,7 +233,6 @@ struct exynos_drm_g2d_private { struct device *dev; struct list_head inuse_cmdlist; struct list_head event_list; - struct list_head userptr_list; }; struct exynos_drm_ipp_private { diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 8001587..d0be6dc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -118,9 +118,6 @@ #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) -/* maximum buffer pool size of userptr is 64MB as default */ -#define MAX_POOL (64 * 1024 * 1024) - enum { BUF_TYPE_GEM = 1, BUF_TYPE_USERPTR, @@ -185,19 +182,6 @@ struct drm_exynos_pending_g2d_event { struct drm_exynos_g2d_event event; }; -struct g2d_cmdlist_userptr { - struct list_head list; - dma_addr_t dma_addr; - unsigned long userptr; - unsigned long size; - struct page **pages; - unsigned int npages; - struct sg_table *sgt; - struct vm_area_struct *vma; - atomic_t refcount; - bool in_pool; - bool out_of_list; -}; struct g2d_cmdlist_node { struct list_head list; struct g2d_cmdlist *cmdlist; @@ -242,7 +226,6 @@ struct g2d_data { struct kmem_cache *runqueue_slab; unsigned long current_pool; - unsigned long max_pool; }; static int g2d_init_cmdlist(struct g2d_data *g2d) @@ -354,232 +337,6 @@ add_to_list: list_add_tail(&node->event->base.link, &g2d_priv->event_list); } -static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, - unsigned long obj, - bool force) -{ - struct g2d_cmdlist_userptr *g2d_userptr = - (struct g2d_cmdlist_userptr *)obj; - - if (!obj) - return; - - if (force) - goto out; - - atomic_dec(&g2d_userptr->refcount); - - if (atomic_read(&g2d_userptr->refcount) > 0) - return; - - if (g2d_userptr->in_pool) - return; - -out: - exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, - DMA_BIDIRECTIONAL); - - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, - g2d_userptr->npages, - g2d_userptr->vma); - - exynos_gem_put_vma(g2d_userptr->vma); - - if (!g2d_userptr->out_of_list) - list_del_init(&g2d_userptr->list); - - sg_free_table(g2d_userptr->sgt); - kfree(g2d_userptr->sgt); - - drm_free_large(g2d_userptr->pages); - kfree(g2d_userptr); -} - -static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, - unsigned long userptr, - unsigned long size, - struct drm_file *filp, - unsigned long *obj) -{ - struct drm_exynos_file_private *file_priv = filp->driver_priv; - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; - struct g2d_cmdlist_userptr *g2d_userptr; - struct g2d_data *g2d; - struct page **pages; - struct sg_table *sgt; - struct vm_area_struct *vma; - unsigned long start, end; - unsigned int npages, offset; - int ret; - - if (!size) { - DRM_ERROR("invalid userptr size.\n"); - return ERR_PTR(-EINVAL); - } - - g2d = dev_get_drvdata(g2d_priv->dev); - - /* check if userptr already exists in userptr_list. */ - list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) { - if (g2d_userptr->userptr == userptr) { - /* - * also check size because there could be same address - * and different size. - */ - if (g2d_userptr->size == size) { - atomic_inc(&g2d_userptr->refcount); - *obj = (unsigned long)g2d_userptr; - - return &g2d_userptr->dma_addr; - } - - /* - * at this moment, maybe g2d dma is accessing this - * g2d_userptr memory region so just remove this - * g2d_userptr object from userptr_list not to be - * referred again and also except it the userptr - * pool to be released after the dma access completion. - */ - g2d_userptr->out_of_list = true; - g2d_userptr->in_pool = false; - list_del_init(&g2d_userptr->list); - - break; - } - } - - g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL); - if (!g2d_userptr) - return ERR_PTR(-ENOMEM); - - atomic_set(&g2d_userptr->refcount, 1); - - start = userptr & PAGE_MASK; - offset = userptr & ~PAGE_MASK; - end = PAGE_ALIGN(userptr + size); - npages = (end - start) >> PAGE_SHIFT; - g2d_userptr->npages = npages; - - pages = drm_calloc_large(npages, sizeof(struct page *)); - if (!pages) { - DRM_ERROR("failed to allocate pages.\n"); - ret = -ENOMEM; - goto err_free; - } - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, userptr); - if (!vma) { - up_read(¤t->mm->mmap_sem); - DRM_ERROR("failed to get vm region.\n"); - ret = -EFAULT; - goto err_free_pages; - } - - if (vma->vm_end < userptr + size) { - up_read(¤t->mm->mmap_sem); - DRM_ERROR("vma is too small.\n"); - ret = -EFAULT; - goto err_free_pages; - } - - g2d_userptr->vma = exynos_gem_get_vma(vma); - if (!g2d_userptr->vma) { - up_read(¤t->mm->mmap_sem); - DRM_ERROR("failed to copy vma.\n"); - ret = -ENOMEM; - goto err_free_pages; - } - - g2d_userptr->size = size; - - ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, - npages, pages, vma); - if (ret < 0) { - up_read(¤t->mm->mmap_sem); - DRM_ERROR("failed to get user pages from userptr.\n"); - goto err_put_vma; - } - - up_read(¤t->mm->mmap_sem); - g2d_userptr->pages = pages; - - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) { - ret = -ENOMEM; - goto err_free_userptr; - } - - ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, - size, GFP_KERNEL); - if (ret < 0) { - DRM_ERROR("failed to get sgt from pages.\n"); - goto err_free_sgt; - } - - g2d_userptr->sgt = sgt; - - ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt, - DMA_BIDIRECTIONAL); - if (ret < 0) { - DRM_ERROR("failed to map sgt with dma region.\n"); - goto err_sg_free_table; - } - - g2d_userptr->dma_addr = sgt->sgl[0].dma_address; - g2d_userptr->userptr = userptr; - - list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list); - - if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { - g2d->current_pool += npages << PAGE_SHIFT; - g2d_userptr->in_pool = true; - } - - *obj = (unsigned long)g2d_userptr; - - return &g2d_userptr->dma_addr; - -err_sg_free_table: - sg_free_table(sgt); - -err_free_sgt: - kfree(sgt); - -err_free_userptr: - exynos_gem_put_pages_to_userptr(g2d_userptr->pages, - g2d_userptr->npages, - g2d_userptr->vma); - -err_put_vma: - exynos_gem_put_vma(g2d_userptr->vma); - -err_free_pages: - drm_free_large(pages); - -err_free: - kfree(g2d_userptr); - - return ERR_PTR(ret); -} - -static void g2d_userptr_free_all(struct drm_device *drm_dev, - struct g2d_data *g2d, - struct drm_file *filp) -{ - struct drm_exynos_file_private *file_priv = filp->driver_priv; - struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; - struct g2d_cmdlist_userptr *g2d_userptr, *n; - - list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list) - if (g2d_userptr->in_pool) - g2d_userptr_put_dma_addr(drm_dev, - (unsigned long)g2d_userptr, - true); - - g2d->current_pool = 0; -} - static enum g2d_reg_type g2d_get_reg_type(int reg_offset) { enum g2d_reg_type reg_type; @@ -734,29 +491,8 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d, goto err; } } else { - struct drm_exynos_g2d_userptr g2d_userptr; - - if (copy_from_user(&g2d_userptr, (void __user *)handle, - sizeof(struct drm_exynos_g2d_userptr))) { - ret = -EFAULT; - goto err; - } - - if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, - g2d_userptr.size)) { - ret = -EFAULT; - goto err; - } - - addr = g2d_userptr_get_dma_addr(drm_dev, - g2d_userptr.userptr, - g2d_userptr.size, - file, - &handle); - if (IS_ERR(addr)) { - ret = -EFAULT; - goto err; - } + ret = -EFAULT; + goto err; } cmdlist->data[reg_pos + 1] = *addr; @@ -793,8 +529,7 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, filp); else - g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, - false); + BUG(); buf_info->reg_types[i] = REG_TYPE_NONE; buf_info->handles[reg_type] = 0; @@ -1329,7 +1064,6 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev, INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); INIT_LIST_HEAD(&g2d_priv->event_list); - INIT_LIST_HEAD(&g2d_priv->userptr_list); return 0; } @@ -1363,9 +1097,6 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, } mutex_unlock(&g2d->cmdlist_mutex); - /* release all g2d_userptr in pool. */ - g2d_userptr_free_all(drm_dev, g2d, file); - kfree(file_priv->g2d_priv); } @@ -1433,8 +1164,6 @@ static int g2d_probe(struct platform_device *pdev) goto err_put_clk; } - g2d->max_pool = MAX_POOL; - platform_set_drvdata(pdev, g2d); subdrv = &g2d->subdrv; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 163a054..cb624d9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -496,66 +496,6 @@ void exynos_gem_put_vma(struct vm_area_struct *vma) kfree(vma); } -int exynos_gem_get_pages_from_userptr(unsigned long start, - unsigned int npages, - struct page **pages, - struct vm_area_struct *vma) -{ - int get_npages; - - /* the memory region mmaped with VM_PFNMAP. */ - if (vma_is_io(vma)) { - unsigned int i; - - for (i = 0; i < npages; ++i, start += PAGE_SIZE) { - unsigned long pfn; - int ret = follow_pfn(vma, start, &pfn); - if (ret) - return ret; - - pages[i] = pfn_to_page(pfn); - } - - if (i != npages) { - DRM_ERROR("failed to get user_pages.\n"); - return -EINVAL; - } - - return 0; - } - - get_npages = get_user_pages(current, current->mm, start, - npages, 1, 1, pages, NULL); - get_npages = max(get_npages, 0); - if (get_npages != npages) { - DRM_ERROR("failed to get user_pages.\n"); - while (get_npages) - put_page(pages[--get_npages]); - return -EFAULT; - } - - return 0; -} - -void exynos_gem_put_pages_to_userptr(struct page **pages, - unsigned int npages, - struct vm_area_struct *vma) -{ - if (!vma_is_io(vma)) { - unsigned int i; - - for (i = 0; i < npages; i++) { - set_page_dirty_lock(pages[i]); - - /* - * undo the reference we took when populating - * the table. - */ - put_page(pages[i]); - } - } -} - int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, struct sg_table *sgt, enum dma_data_direction dir) diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 1592c0b..07c00a3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -21,7 +21,6 @@ * exynos drm gem buffer structure. * * @kvaddr: kernel virtual address to allocated memory region. - * *userptr: user space address. * @dma_addr: bus address(accessed by dma) to allocated memory region. * - this address could be physical address without IOMMU and * device address with IOMMU. @@ -29,19 +28,15 @@ * @pages: Array of backing pages. * @sgt: sg table to transfer page data. * @size: size of allocated memory region. - * @pfnmap: indicate whether memory region from userptr is mmaped with - * VM_PFNMAP or not. */ struct exynos_drm_gem_buf { void __iomem *kvaddr; - unsigned long userptr; dma_addr_t dma_addr; struct dma_attrs dma_attrs; unsigned int write; struct page **pages; struct sg_table *sgt; unsigned long size; - bool pfnmap; }; /* @@ -125,10 +120,6 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, int exynos_drm_gem_mmap_buffer(struct file *filp, struct vm_area_struct *vma); -/* map user space allocated by malloc to pages. */ -int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - /* get buffer information to memory region allocated by gem. */ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -168,17 +159,6 @@ struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma); /* release a userspace virtual memory area. */ void exynos_gem_put_vma(struct vm_area_struct *vma); -/* get pages from user space. */ -int exynos_gem_get_pages_from_userptr(unsigned long start, - unsigned int npages, - struct page **pages, - struct vm_area_struct *vma); - -/* drop the reference to pages. */ -void exynos_gem_put_pages_to_userptr(struct page **pages, - unsigned int npages, - struct vm_area_struct *vma); - /* map sgt with dma region. */ int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, struct sg_table *sgt,