diff mbox series

[v5,4/5] drm: Add library for shmem backed GEM objects

Message ID 20181017130454.44292-5-noralf@tronnes.org (mailing list archive)
State New, archived
Headers show
Series drm: Add shmem GEM library | expand

Commit Message

Noralf Trønnes Oct. 17, 2018, 1:04 p.m. UTC
This adds a library for shmem backed GEM objects.

v5:
- Drop drm_gem_shmem_prime_mmap() (Daniel Vetter)
- drm_gem_shmem_mmap(): Subtract drm_vma_node_start() to get the real
  vma->vm_pgoff
- drm_gem_shmem_fault(): Use vmf->pgoff now that vma->vm_pgoff is correct

v4:
- Drop cache modes (Thomas Hellstrom)
- Add a GEM attached vtable

v3:
- Grammar (Sam Ravnborg)
- s/drm_gem_shmem_put_pages_unlocked/drm_gem_shmem_put_pages_locked/
  (Sam Ravnborg)
- Add debug output in error path (Sam Ravnborg)

Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
---
 Documentation/gpu/drm-kms-helpers.rst  |  12 +
 drivers/gpu/drm/Kconfig                |   6 +
 drivers/gpu/drm/Makefile               |   1 +
 drivers/gpu/drm/drm_gem_shmem_helper.c | 551 +++++++++++++++++++++++++++++++++
 include/drm/drm_gem_shmem_helper.h     | 153 +++++++++
 5 files changed, 723 insertions(+)
 create mode 100644 drivers/gpu/drm/drm_gem_shmem_helper.c
 create mode 100644 include/drm/drm_gem_shmem_helper.h

Comments

Daniel Vetter Oct. 17, 2018, 3:46 p.m. UTC | #1
On Wed, Oct 17, 2018 at 03:04:53PM +0200, Noralf Trønnes wrote:
> This adds a library for shmem backed GEM objects.
> 
> v5:
> - Drop drm_gem_shmem_prime_mmap() (Daniel Vetter)
> - drm_gem_shmem_mmap(): Subtract drm_vma_node_start() to get the real
>   vma->vm_pgoff
> - drm_gem_shmem_fault(): Use vmf->pgoff now that vma->vm_pgoff is correct
> 
> v4:
> - Drop cache modes (Thomas Hellstrom)
> - Add a GEM attached vtable
> 
> v3:
> - Grammar (Sam Ravnborg)
> - s/drm_gem_shmem_put_pages_unlocked/drm_gem_shmem_put_pages_locked/
>   (Sam Ravnborg)
> - Add debug output in error path (Sam Ravnborg)
> 
> Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
> ---
>  Documentation/gpu/drm-kms-helpers.rst  |  12 +
>  drivers/gpu/drm/Kconfig                |   6 +
>  drivers/gpu/drm/Makefile               |   1 +
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 551 +++++++++++++++++++++++++++++++++
>  include/drm/drm_gem_shmem_helper.h     | 153 +++++++++
>  5 files changed, 723 insertions(+)
>  create mode 100644 drivers/gpu/drm/drm_gem_shmem_helper.c
>  create mode 100644 include/drm/drm_gem_shmem_helper.h
> 
> diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
> index 4b4dc236ef6f..8305d3566928 100644
> --- a/Documentation/gpu/drm-kms-helpers.rst
> +++ b/Documentation/gpu/drm-kms-helpers.rst
> @@ -335,3 +335,15 @@ Legacy CRTC/Modeset Helper Functions Reference
>  
>  .. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
>     :export:
> +
> +SHMEM GEM Helper Reference
> +==========================
> +
> +.. kernel-doc:: drivers/gpu/drm/drm_gem_shmem_helper.c
> +   :doc: overview
> +
> +.. kernel-doc:: include/drm/drm_gem_shmem_helper.h
> +   :internal:
> +
> +.. kernel-doc:: drivers/gpu/drm/drm_gem_shmem_helper.c
> +   :export:

This doesn't make sense here imo - put them right next to the cma helpers
in drm-mm.rst instead?

> diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
> index 736b7e67e4ec..46090a36f52e 100644
> --- a/drivers/gpu/drm/Kconfig
> +++ b/drivers/gpu/drm/Kconfig
> @@ -157,6 +157,12 @@ config DRM_KMS_CMA_HELPER
>  	help
>  	  Choose this if you need the KMS CMA helper functions
>  
> +config DRM_GEM_SHMEM_HELPER
> +	bool
> +	depends on DRM
> +	help
> +	  Choose this if you need the GEM shmem helper functions
> +
>  config DRM_VM
>  	bool
>  	depends on DRM && MMU
> diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
> index 576ba985e138..8733ceb41292 100644
> --- a/drivers/gpu/drm/Makefile
> +++ b/drivers/gpu/drm/Makefile
> @@ -25,6 +25,7 @@ drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
>  drm-$(CONFIG_DRM_VM) += drm_vm.o
>  drm-$(CONFIG_COMPAT) += drm_ioc32.o
>  drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
> +drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
>  drm-$(CONFIG_PCI) += ati_pcigart.o
>  drm-$(CONFIG_DRM_PANEL) += drm_panel.o
>  drm-$(CONFIG_OF) += drm_of.o
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
> new file mode 100644
> index 000000000000..c4eec51d3282
> --- /dev/null
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -0,0 +1,551 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright 2018 Noralf Trønnes
> + */
> +
> +#include <linux/dma-buf.h>
> +#include <linux/export.h>
> +#include <linux/mutex.h>
> +#include <linux/shmem_fs.h>
> +#include <linux/slab.h>
> +#include <linux/vmalloc.h>
> +
> +#include <drm/drm_device.h>
> +#include <drm/drm_drv.h>
> +#include <drm/drm_gem_shmem_helper.h>
> +#include <drm/drm_prime.h>
> +#include <drm/drm_print.h>
> +
> +/**
> + * DOC: overview
> + *
> + * This library provides helpers for GEM objects backed by shmem buffers
> + * allocated using anonymous pageable memory.

Would be neat to insert a link to the cma version here (and to the gem
version in the cma helpers) and spend a few words on when to use each.

> + */
> +
> +static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
> +	.free = drm_gem_shmem_free_object,
> +	.print_info = drm_gem_shmem_print_info,
> +	.pin = drm_gem_shmem_pin,
> +	.unpin = drm_gem_shmem_unpin,
> +	.get_sg_table = drm_gem_shmem_get_sg_table,
> +	.vmap = drm_gem_shmem_vmap,
> +	.vunmap = drm_gem_shmem_vunmap,
> +	.vm_ops = &drm_gem_shmem_vm_ops,
> +};
> +
> +/**
> + * drm_gem_shmem_create - Allocate an object with the given size
> + * @dev: DRM device
> + * @size: Size of the object to allocate
> + *
> + * This function creates a shmem GEM object.
> + *
> + * Returns:
> + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> + * error code on failure.
> + */
> +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> +{
> +	struct drm_gem_shmem_object *shmem;
> +	struct drm_gem_object *obj;
> +	int ret;
> +
> +	size = PAGE_ALIGN(size);
> +
> +	if (dev->driver->gem_create_object)
> +		obj = dev->driver->gem_create_object(dev, size);
> +	else
> +		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
> +	if (!obj)
> +		return ERR_PTR(-ENOMEM);
> +
> +	if (!obj->funcs)
> +		obj->funcs = &drm_gem_shmem_funcs;
> +
> +	ret = drm_gem_object_init(dev, obj, size);
> +	if (ret)
> +		goto err_free;
> +
> +	ret = drm_gem_create_mmap_offset(obj);
> +	if (ret)
> +		goto err_release;
> +
> +	shmem = to_drm_gem_shmem_obj(obj);
> +	mutex_init(&shmem->pages_lock);
> +	mutex_init(&shmem->vmap_lock);
> +
> +	return shmem;
> +
> +err_release:
> +	drm_gem_object_release(obj);
> +err_free:
> +	kfree(shmem);
> +
> +	return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
> +
> +/**
> + * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
> + * @obj: GEM object to free
> + *
> + * This function cleans up the GEM object state and frees the memory used to
> + * store the object itself.
> + */
> +void drm_gem_shmem_free_object(struct drm_gem_object *obj)
> +{
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	WARN_ON(shmem->vmap_use_count);
> +
> +	if (obj->import_attach) {
> +		shmem->pages_use_count--;
> +		drm_prime_gem_destroy(obj, shmem->sgt);
> +		kvfree(shmem->pages);
> +	}
> +
> +	WARN_ON(shmem->pages_use_count);
> +
> +	drm_gem_object_release(obj);
> +	mutex_destroy(&shmem->pages_lock);
> +	mutex_destroy(&shmem->vmap_lock);
> +	kfree(shmem);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
> +
> +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
> +{
> +	struct drm_gem_object *obj = &shmem->base;
> +	struct page **pages;
> +
> +	if (shmem->pages_use_count++ > 0)
> +		return 0;
> +
> +	pages = drm_gem_get_pages(obj);

I think moving the drm_gem_get/put_pages into the shmem helper here would
be a good cleanup. Plus aligning the prefix to drm_gem_shmem - they really
only work with shmem, not cma gem bo.

> +	if (IS_ERR(pages)) {
> +		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
> +		shmem->pages_use_count = 0;
> +		return PTR_ERR(pages);
> +	}
> +
> +	shmem->pages = pages;
> +
> +	return 0;
> +}
> +
> +/*
> + * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
> + * @shmem: shmem GEM object
> + *
> + * This function makes sure that backing pages exists for the shmem GEM object
> + * and increases the use count.
> + *
> + * Returns:
> + * 0 on success or a negative error code on failure.
> + */
> +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
> +{
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&shmem->pages_lock);
> +	if (ret)
> +		return ret;
> +	ret = drm_gem_shmem_get_pages_locked(shmem);
> +	mutex_unlock(&shmem->pages_lock);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_get_pages);
> +
> +static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
> +{
> +	struct drm_gem_object *obj = &shmem->base;
> +
> +	if (WARN_ON_ONCE(!shmem->pages_use_count))
> +		return;
> +
> +	if (--shmem->pages_use_count > 0)
> +		return;
> +
> +	drm_gem_put_pages(obj, shmem->pages,
> +			  shmem->pages_mark_dirty_on_put,
> +			  shmem->pages_mark_accessed_on_put);
> +	shmem->pages = NULL;
> +}
> +
> +/*
> + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
> + * @shmem: shmem GEM object
> + *
> + * This function decreases the use count and puts the backing pages when use drops to zero.
> + */
> +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
> +{
> +	mutex_lock(&shmem->pages_lock);
> +	drm_gem_shmem_put_pages_locked(shmem);
> +	mutex_unlock(&shmem->pages_lock);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_put_pages);
> +
> +/**
> + * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
> + * @obj: GEM object
> + *
> + * This function makes sure the backing pages are pinned in memory while the
> + * buffer is exported.
> + *
> + * Returns:
> + * 0 on success or a negative error code on failure.
> + */
> +int drm_gem_shmem_pin(struct drm_gem_object *obj)
> +{
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	return drm_gem_shmem_get_pages(shmem);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_pin);
> +
> +/**
> + * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
> + * @obj: GEM object
> + *
> + * This function removes the requirement that the backing pages are pinned in
> + * memory.
> + */
> +void drm_gem_shmem_unpin(struct drm_gem_object *obj)
> +{
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	drm_gem_shmem_put_pages(shmem);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_unpin);
> +
> +static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
> +{
> +	struct drm_gem_object *obj = &shmem->base;
> +	int ret;
> +
> +	if (shmem->vmap_use_count++ > 0)
> +		return shmem->vaddr;
> +
> +	ret = drm_gem_shmem_get_pages(shmem);
> +	if (ret)
> +		goto err_zero_use;
> +
> +	if (obj->import_attach)
> +		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
> +	else
> +		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
> +
> +	if (!shmem->vaddr) {
> +		DRM_DEBUG_KMS("Failed to vmap pages\n");
> +		ret = -ENOMEM;
> +		goto err_put_pages;
> +	}
> +
> +	return shmem->vaddr;
> +
> +err_put_pages:
> +	drm_gem_shmem_put_pages(shmem);
> +err_zero_use:
> +	shmem->vmap_use_count = 0;
> +
> +	return ERR_PTR(ret);
> +}
> +
> +/*
> + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
> + * @shmem: shmem GEM object
> + *
> + * This function makes sure that a virtual address exists for the buffer backing
> + * the shmem GEM object.
> + *
> + * Returns:
> + * 0 on success or a negative error code on failure.
> + */
> +void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
> +{
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +	void *vaddr;
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&shmem->vmap_lock);
> +	if (ret)
> +		return ERR_PTR(ret);
> +	vaddr = drm_gem_shmem_vmap_locked(shmem);
> +	mutex_unlock(&shmem->vmap_lock);
> +
> +	return vaddr;
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_vmap);
> +
> +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
> +{
> +	struct drm_gem_object *obj = &shmem->base;
> +
> +	if (WARN_ON_ONCE(!shmem->vmap_use_count))
> +		return;
> +
> +	if (--shmem->vmap_use_count > 0)
> +		return;
> +
> +	if (obj->import_attach)
> +		dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
> +	else
> +		vunmap(shmem->vaddr);
> +
> +	shmem->vaddr = NULL;
> +	drm_gem_shmem_put_pages(shmem);
> +}
> +
> +/*
> + * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
> + * @shmem: shmem GEM object
> + *
> + * This function removes the virtual address when use count drops to zero.
> + */
> +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
> +{
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	mutex_lock(&shmem->vmap_lock);
> +	drm_gem_shmem_vunmap_locked(shmem);
> +	mutex_unlock(&shmem->vmap_lock);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_vunmap);
> +
> +static struct drm_gem_shmem_object *
> +drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
> +				 struct drm_device *dev, size_t size,
> +				 uint32_t *handle)
> +{
> +	struct drm_gem_shmem_object *shmem;
> +	int ret;
> +
> +	shmem = drm_gem_shmem_create(dev, size);
> +	if (IS_ERR(shmem))
> +		return shmem;
> +
> +	/*
> +	 * Allocate an id of idr table where the obj is registered
> +	 * and handle has the id what user can see.
> +	 */
> +	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
> +	/* drop reference from allocate - handle holds it now. */
> +	drm_gem_object_put_unlocked(&shmem->base);
> +	if (ret)
> +		return ERR_PTR(ret);
> +
> +	return shmem;
> +}
> +
> +/**
> + * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
> + * @file: DRM file structure to create the dumb buffer for
> + * @dev: DRM device
> + * @args: IOCTL data
> + *
> + * This function computes the pitch of the dumb buffer and rounds it up to an
> + * integer number of bytes per pixel. Drivers for hardware that doesn't have
> + * any additional restrictions on the pitch can directly use this function as
> + * their &drm_driver.dumb_create callback.
> + *
> + * For hardware with additional restrictions, drivers can adjust the fields
> + * set up by userspace before calling into this function.
> + *
> + * Returns:
> + * 0 on success or a negative error code on failure.
> + */
> +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
> +			      struct drm_mode_create_dumb *args)
> +{
> +	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
> +	struct drm_gem_shmem_object *shmem;
> +
> +	if (!args->pitch || !args->size) {
> +		args->pitch = min_pitch;
> +		args->size = args->pitch * args->height;
> +	} else {
> +		/* ensure sane minimum values */
> +		if (args->pitch < min_pitch)
> +			args->pitch = min_pitch;
> +		if (args->size < args->pitch * args->height)
> +			args->size = args->pitch * args->height;
> +	}
> +
> +	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
> +
> +	return PTR_ERR_OR_ZERO(shmem);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
> +
> +static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> +{
> +	struct vm_area_struct *vma = vmf->vma;
> +	struct drm_gem_object *obj = vma->vm_private_data;
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +	loff_t num_pages = obj->size >> PAGE_SHIFT;
> +	struct page *page;
> +
> +	if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
> +		return VM_FAULT_SIGBUS;
> +
> +	page = shmem->pages[vmf->pgoff];
> +
> +	return vmf_insert_page(vma, vmf->address, page);
> +}
> +
> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> +{
> +	struct drm_gem_object *obj = vma->vm_private_data;
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	drm_gem_shmem_put_pages(shmem);

Imbalance get/put_pages here, if someone forks (which is what calls
vm_ops->open on an existing vma).

> +	drm_gem_vm_close(vma);
> +}
> +
> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> +	.fault = drm_gem_shmem_fault,
> +	.open = drm_gem_vm_open,
> +	.close = drm_gem_shmem_vm_close,
> +};
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
> +
> +/**
> + * drm_gem_shmem_mmap - Memory-map a shmem GEM object
> + * @filp: File object
> + * @vma: VMA for the area to be mapped
> + *
> + * This function implements an augmented version of the GEM DRM file mmap
> + * operation for shmem objects. Drivers which employ the shmem helpers should
> + * use this function as their &file_operations.mmap handler in the DRM device file's
> + * file_operations structure.
> + *
> + * Instead of directly referencing this function, drivers should use the
> + * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
> + *
> + * Returns:
> + * 0 on success or a negative error code on failure.
> + */

Between cma helpers, gem shmem helpers and the drm_gem_mmap we now have
quite a bit a confusion of mmap functions. I think it'd be good to update
the kerneldoc for drm_gem_mmap() to point at these here (both the shmem
and cma version), so that people prefer to use these helpers here. Direct
call to drm_gem_mmap would only be needed if you want to use your own
vm_ops.

> +int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +	struct drm_gem_shmem_object *shmem;
> +	int ret;
> +
> +	ret = drm_gem_mmap(filp, vma);
> +	if (ret)
> +		return ret;
> +
> +	shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
> +
> +	ret = drm_gem_shmem_get_pages(shmem);
> +	if (ret) {
> +		drm_gem_vm_close(vma);
> +		return ret;
> +	}
> +
> +	/* VM_PFNMAP was set by drm_gem_mmap() */
> +	vma->vm_flags &= ~VM_PFNMAP;
> +	vma->vm_flags |= VM_MIXEDMAP;
> +	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
> +
> +	fput(vma->vm_file);
> +	vma->vm_file = get_file(shmem->base.filp);
> +	/* Remove the fake offset */
> +	vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);

Looking through your mmap code there's 2 bits:
- shmem isn't coherent, if you try to actually buffer share this with
  prime I expect it'll fail badly. We'd need begin/end_cpu_access
  callbacks in dma_buf_ops to make this work. So not entirely sure how
  much value there is in implementing the prime mmap stuff (but doesn't
  hurt to have it).

- shmem already has an mmap implemtation, you can just use that. Needs a
  similar mmap forwarding trick as you've done for prime mmap, but instead
  you forward to obj->base.filp. That allows you to cut away all the
  vm_ops and everything. I guess we could clean this up in a follow-up,
  since it doesn't have an effect on the interfaces exposed to drivers. At
  least not a big one, drm_gem_shmem_vm_ops would disappear.

> +
> +/**
> + * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
> + * @p: DRM printer
> + * @indent: Tab indentation level
> + * @obj: GEM object

Would be good to reference the hook this is meant for:
&drm_gem_object_funcs.print_info, same for all the others. For those
drivers that want to pick&choose.

Cheers, Daniel

> + */
> +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
> +			      const struct drm_gem_object *obj)
> +{
> +	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
> +	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
> +	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_print_info);
> +
> +/**
> + * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
> + *                              pages for a shmem GEM object
> + * @obj: GEM object
> + *
> + * This function exports a scatter/gather table suitable for PRIME usage by
> + * calling the standard DMA mapping API.
> + *
> + * Returns:
> + * A pointer to the scatter/gather table of pinned pages or NULL on failure.
> + */
> +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
> +{
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
> +
> +/**
> + * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
> + *                 another driver's scatter/gather table of pinned pages
> + * @dev: Device to import into
> + * @attach: DMA-BUF attachment
> + * @sgt: Scatter/gather table of pinned pages
> + *
> + * This function imports a scatter/gather table exported via DMA-BUF by
> + * another driver. Drivers that use the shmem helpers should set this as their
> + * &drm_driver.gem_prime_import_sg_table callback.
> + *
> + * Returns:
> + * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
> + * error code on failure.
> + */
> +struct drm_gem_object *
> +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
> +				    struct dma_buf_attachment *attach,
> +				    struct sg_table *sgt)
> +{
> +	size_t size = PAGE_ALIGN(attach->dmabuf->size);
> +	size_t npages = size >> PAGE_SHIFT;
> +	struct drm_gem_shmem_object *shmem;
> +	int ret;
> +
> +	shmem = drm_gem_shmem_create(dev, size);
> +	if (IS_ERR(shmem))
> +		return ERR_CAST(shmem);
> +
> +	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> +	if (!shmem->pages) {
> +		ret = -ENOMEM;
> +		goto err_free_gem;
> +	}
> +
> +	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
> +	if (ret < 0)
> +		goto err_free_array;
> +
> +	shmem->sgt = sgt;
> +	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
> +
> +	DRM_DEBUG_PRIME("size = %zu\n", size);
> +
> +	return &shmem->base;
> +
> +err_free_array:
> +	kvfree(shmem->pages);
> +err_free_gem:
> +	drm_gem_object_put_unlocked(&shmem->base);
> +
> +	return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> new file mode 100644
> index 000000000000..26b05e06407d
> --- /dev/null
> +++ b/include/drm/drm_gem_shmem_helper.h
> @@ -0,0 +1,153 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef __DRM_GEM_SHMEM_HELPER_H__
> +#define __DRM_GEM_SHMEM_HELPER_H__
> +
> +#include <linux/fs.h>
> +#include <linux/mm.h>
> +#include <linux/mutex.h>
> +
> +#include <drm/drm_file.h>
> +#include <drm/drm_gem.h>
> +#include <drm/drm_ioctl.h>
> +#include <drm/drm_prime.h>
> +
> +struct dma_buf_attachment;
> +struct drm_mode_create_dumb;
> +struct drm_printer;
> +struct sg_table;
> +
> +/**
> + * struct drm_gem_shmem_object - GEM object backed by shmem
> + */
> +struct drm_gem_shmem_object {
> +	/**
> +	 * @base: Base GEM object
> +	 */
> +	struct drm_gem_object base;
> +
> +	/**
> +	 * @pages_lock: Protects the page table and use count
> +	 */
> +	struct mutex pages_lock;
> +
> +	/**
> +	 * @pages: Page table
> +	 */
> +	struct page **pages;
> +
> +	/**
> +	 * @pages_use_count:
> +	 *
> +	 * Reference count on the pages table.
> +	 * The pages are put when the count reaches zero.
> +	 */
> +	unsigned int pages_use_count;
> +
> +	/**
> +	 * @pages_mark_dirty_on_put:
> +	 *
> +	 * Mark pages as dirty when they are put.
> +	 */
> +	unsigned int pages_mark_dirty_on_put    : 1;
> +
> +	/**
> +	 * @pages_mark_accessed_on_put:
> +	 *
> +	 * Mark pages as accessed when they are put.
> +	 */
> +	unsigned int pages_mark_accessed_on_put : 1;
> +
> +	/**
> +	 * @sgt: Scatter/gather table for imported PRIME buffers
> +	 */
> +	struct sg_table *sgt;
> +
> +	/**
> +	 * @vmap_lock: Protects the vmap address and use count
> +	 */
> +	struct mutex vmap_lock;
> +
> +	/**
> +	 * @vaddr: Kernel virtual address of the backing memory
> +	 */
> +	void *vaddr;
> +
> +	/**
> +	 * @vmap_use_count:
> +	 *
> +	 * Reference count on the virtual address.
> +	 * The address are un-mapped when the count reaches zero.
> +	 */
> +	unsigned int vmap_use_count;
> +};
> +
> +#define to_drm_gem_shmem_obj(obj) \
> +	container_of(obj, struct drm_gem_shmem_object, base)
> +
> +/**
> + * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
> + * @name: name for the generated structure
> + *
> + * This macro autogenerates a suitable &struct file_operations for shmem based
> + * drivers, which can be assigned to &drm_driver.fops. Note that this structure
> + * cannot be shared between drivers, because it contains a reference to the
> + * current module using THIS_MODULE.
> + *
> + * Note that the declaration is already marked as static - if you need a
> + * non-static version of this you're probably doing it wrong and will break the
> + * THIS_MODULE reference by accident.
> + */
> +#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
> +	static const struct file_operations name = {\
> +		.owner		= THIS_MODULE,\
> +		.open		= drm_open,\
> +		.release	= drm_release,\
> +		.unlocked_ioctl	= drm_ioctl,\
> +		.compat_ioctl	= drm_compat_ioctl,\
> +		.poll		= drm_poll,\
> +		.read		= drm_read,\
> +		.llseek		= noop_llseek,\
> +		.mmap		= drm_gem_shmem_mmap, \
> +	}
> +
> +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
> +void drm_gem_shmem_free_object(struct drm_gem_object *obj);
> +
> +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
> +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
> +int drm_gem_shmem_pin(struct drm_gem_object *obj);
> +void drm_gem_shmem_unpin(struct drm_gem_object *obj);
> +void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
> +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
> +
> +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
> +			      struct drm_mode_create_dumb *args);
> +
> +int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
> +
> +extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
> +
> +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
> +			      const struct drm_gem_object *obj);
> +
> +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
> +struct drm_gem_object *
> +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
> +				    struct dma_buf_attachment *attach,
> +				    struct sg_table *sgt);
> +
> +/**
> + * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
> + *
> + * This macro provides a shortcut for setting the shmem GEM operations in
> + * the &drm_driver structure.
> + */
> +#define DRM_GEM_SHMEM_DRIVER_OPS \
> +	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
> +	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
> +	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
> +	.gem_prime_mmap		= drm_gem_prime_mmap, \
> +	.dumb_create		= drm_gem_shmem_dumb_create
> +
> +#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
> -- 
> 2.15.1
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
Noralf Trønnes Oct. 22, 2018, 2:15 p.m. UTC | #2
Den 17.10.2018 17.46, skrev Daniel Vetter:
> On Wed, Oct 17, 2018 at 03:04:53PM +0200, Noralf Trønnes wrote:
>> This adds a library for shmem backed GEM objects.
>>
>> v5:
>> - Drop drm_gem_shmem_prime_mmap() (Daniel Vetter)
>> - drm_gem_shmem_mmap(): Subtract drm_vma_node_start() to get the real
>>    vma->vm_pgoff
>> - drm_gem_shmem_fault(): Use vmf->pgoff now that vma->vm_pgoff is correct
>>
>> v4:
>> - Drop cache modes (Thomas Hellstrom)
>> - Add a GEM attached vtable
>>
>> v3:
>> - Grammar (Sam Ravnborg)
>> - s/drm_gem_shmem_put_pages_unlocked/drm_gem_shmem_put_pages_locked/
>>    (Sam Ravnborg)
>> - Add debug output in error path (Sam Ravnborg)
>>
>> Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
>> ---
>>   Documentation/gpu/drm-kms-helpers.rst  |  12 +
>>   drivers/gpu/drm/Kconfig                |   6 +
>>   drivers/gpu/drm/Makefile               |   1 +
>>   drivers/gpu/drm/drm_gem_shmem_helper.c | 551 +++++++++++++++++++++++++++++++++
>>   include/drm/drm_gem_shmem_helper.h     | 153 +++++++++
>>   5 files changed, 723 insertions(+)
>>   create mode 100644 drivers/gpu/drm/drm_gem_shmem_helper.c
>>   create mode 100644 include/drm/drm_gem_shmem_helper.h
>>

<snip>

>> +static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>> +{
>> +	struct vm_area_struct *vma = vmf->vma;
>> +	struct drm_gem_object *obj = vma->vm_private_data;
>> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> +	loff_t num_pages = obj->size >> PAGE_SHIFT;
>> +	struct page *page;
>> +
>> +	if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
>> +		return VM_FAULT_SIGBUS;
>> +
>> +	page = shmem->pages[vmf->pgoff];
>> +
>> +	return vmf_insert_page(vma, vmf->address, page);
>> +}
>> +
>> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>> +{
>> +	struct drm_gem_object *obj = vma->vm_private_data;
>> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> +
>> +	drm_gem_shmem_put_pages(shmem);
> Imbalance get/put_pages here, if someone forks (which is what calls
> vm_ops->open on an existing vma).
>
>> +	drm_gem_vm_close(vma);
>> +}
>> +
>> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>> +	.fault = drm_gem_shmem_fault,
>> +	.open = drm_gem_vm_open,
>> +	.close = drm_gem_shmem_vm_close,
>> +};
>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>> +
>> +/**
>> + * drm_gem_shmem_mmap - Memory-map a shmem GEM object
>> + * @filp: File object
>> + * @vma: VMA for the area to be mapped
>> + *
>> + * This function implements an augmented version of the GEM DRM file mmap
>> + * operation for shmem objects. Drivers which employ the shmem helpers should
>> + * use this function as their &file_operations.mmap handler in the DRM device file's
>> + * file_operations structure.
>> + *
>> + * Instead of directly referencing this function, drivers should use the
>> + * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
>> + *
>> + * Returns:
>> + * 0 on success or a negative error code on failure.
>> + */
> Between cma helpers, gem shmem helpers and the drm_gem_mmap we now have
> quite a bit a confusion of mmap functions. I think it'd be good to update
> the kerneldoc for drm_gem_mmap() to point at these here (both the shmem
> and cma version), so that people prefer to use these helpers here. Direct
> call to drm_gem_mmap would only be needed if you want to use your own
> vm_ops.
>
>> +int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
>> +{
>> +	struct drm_gem_shmem_object *shmem;
>> +	int ret;
>> +
>> +	ret = drm_gem_mmap(filp, vma);
>> +	if (ret)
>> +		return ret;
>> +
>> +	shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
>> +
>> +	ret = drm_gem_shmem_get_pages(shmem);
>> +	if (ret) {
>> +		drm_gem_vm_close(vma);
>> +		return ret;
>> +	}
>> +
>> +	/* VM_PFNMAP was set by drm_gem_mmap() */
>> +	vma->vm_flags &= ~VM_PFNMAP;
>> +	vma->vm_flags |= VM_MIXEDMAP;
>> +	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
>> +
>> +	fput(vma->vm_file);
>> +	vma->vm_file = get_file(shmem->base.filp);
>> +	/* Remove the fake offset */
>> +	vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
>> +
>> +	return 0;
>> +}
>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
> Looking through your mmap code there's 2 bits:
> - shmem isn't coherent,

You saved me in the last minute with that comment Daniel.

I just remembered that on the Raspberry Pi I need to swap the bytes 
(RGB565,
SPI big endian) before transfer because its SPI can't do 16-bit transfers.
So I removed the swapping and sent the shmem buffer straight through to SPI.
This resulted in fbcon looking erratic and "distorted", so it didn't work
with the generic fbdev emulation shadow buffer being copied by the cpu to
the shmem buffer which is DMA'ed out over SPI. Even though the SPI core is
using the streaming DMA API.
(apparently I've worked so much on DRM helper refactoring that I've
forgotten how the tinydrm drivers work...)

So it looks like I can't use shmem for the current tinydrm drivers, which
all use SPI. Unless there's a solution to my problem, I guess I'll I have
to postpone this shmem work until I can use it in a driver that I'm 
planning
to do further down the road. It will compress the framebuffer before
transfer over USB so no DMA on the shmem buffer.

Do you want me to apply the first 3 patches anyway, even though the shmem
stuff has to wait?

> if you try to actually buffer share this with
>    prime I expect it'll fail badly. We'd need begin/end_cpu_access
>    callbacks in dma_buf_ops to make this work. So not entirely sure how
>    much value there is in implementing the prime mmap stuff (but doesn't
>    hurt to have it).

The generic fbdev emulation uses PRIME mmap.

>
> - shmem already has an mmap implemtation, you can just use that. Needs a
>    similar mmap forwarding trick as you've done for prime mmap, but instead
>    you forward to obj->base.filp. That allows you to cut away all the
>    vm_ops and everything. I guess we could clean this up in a follow-up,
>    since it doesn't have an effect on the interfaces exposed to drivers. At
>    least not a big one, drm_gem_shmem_vm_ops would disappear.

This approach using the shmem mmap seemed to work, not sure if the vm_flags
needs to be touched though.
(I'm putting it up here for the record, increases the chance of me finding
it when I pick up the shmem helper further down the road)

int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
{
     struct drm_gem_object *obj;
     int ret;

     ret = drm_gem_mmap(filp, vma);
     if (ret)
         return ret;

     obj = vma->vm_private_data;

     /* Remove the fake offset */
     vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);

     /* VM_PFNMAP was set by drm_gem_mmap() */
     vma->vm_flags &= ~VM_PFNMAP;
     vma->vm_flags |= VM_MIXEDMAP;
     vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));

     fput(vma->vm_file);
     vma->vm_file = get_file(obj->filp);

     drm_gem_object_put_unlocked(obj);

     return call_mmap(vma->vm_file, vma);
}


Noralf.


>> +
>> +/**
>> + * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
>> + * @p: DRM printer
>> + * @indent: Tab indentation level
>> + * @obj: GEM object
> Would be good to reference the hook this is meant for:
> &drm_gem_object_funcs.print_info, same for all the others. For those
> drivers that want to pick&choose.
>
> Cheers, Daniel
>
>> + */
>> +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
>> +			      const struct drm_gem_object *obj)
>> +{
>> +	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> +
>> +	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
>> +	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
>> +	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
>> +}
>> +EXPORT_SYMBOL(drm_gem_shmem_print_info);
>> +
>> +/**
>> + * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
>> + *                              pages for a shmem GEM object
>> + * @obj: GEM object
>> + *
>> + * This function exports a scatter/gather table suitable for PRIME usage by
>> + * calling the standard DMA mapping API.
>> + *
>> + * Returns:
>> + * A pointer to the scatter/gather table of pinned pages or NULL on failure.
>> + */
>> +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
>> +{
>> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> +
>> +	return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
>> +}
>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
>> +
>> +/**
>> + * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
>> + *                 another driver's scatter/gather table of pinned pages
>> + * @dev: Device to import into
>> + * @attach: DMA-BUF attachment
>> + * @sgt: Scatter/gather table of pinned pages
>> + *
>> + * This function imports a scatter/gather table exported via DMA-BUF by
>> + * another driver. Drivers that use the shmem helpers should set this as their
>> + * &drm_driver.gem_prime_import_sg_table callback.
>> + *
>> + * Returns:
>> + * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
>> + * error code on failure.
>> + */
>> +struct drm_gem_object *
>> +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
>> +				    struct dma_buf_attachment *attach,
>> +				    struct sg_table *sgt)
>> +{
>> +	size_t size = PAGE_ALIGN(attach->dmabuf->size);
>> +	size_t npages = size >> PAGE_SHIFT;
>> +	struct drm_gem_shmem_object *shmem;
>> +	int ret;
>> +
>> +	shmem = drm_gem_shmem_create(dev, size);
>> +	if (IS_ERR(shmem))
>> +		return ERR_CAST(shmem);
>> +
>> +	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
>> +	if (!shmem->pages) {
>> +		ret = -ENOMEM;
>> +		goto err_free_gem;
>> +	}
>> +
>> +	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
>> +	if (ret < 0)
>> +		goto err_free_array;
>> +
>> +	shmem->sgt = sgt;
>> +	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
>> +
>> +	DRM_DEBUG_PRIME("size = %zu\n", size);
>> +
>> +	return &shmem->base;
>> +
>> +err_free_array:
>> +	kvfree(shmem->pages);
>> +err_free_gem:
>> +	drm_gem_object_put_unlocked(&shmem->base);
>> +
>> +	return ERR_PTR(ret);
>> +}
>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
>> diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
>> new file mode 100644
>> index 000000000000..26b05e06407d
>> --- /dev/null
>> +++ b/include/drm/drm_gem_shmem_helper.h
>> @@ -0,0 +1,153 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +
>> +#ifndef __DRM_GEM_SHMEM_HELPER_H__
>> +#define __DRM_GEM_SHMEM_HELPER_H__
>> +
>> +#include <linux/fs.h>
>> +#include <linux/mm.h>
>> +#include <linux/mutex.h>
>> +
>> +#include <drm/drm_file.h>
>> +#include <drm/drm_gem.h>
>> +#include <drm/drm_ioctl.h>
>> +#include <drm/drm_prime.h>
>> +
>> +struct dma_buf_attachment;
>> +struct drm_mode_create_dumb;
>> +struct drm_printer;
>> +struct sg_table;
>> +
>> +/**
>> + * struct drm_gem_shmem_object - GEM object backed by shmem
>> + */
>> +struct drm_gem_shmem_object {
>> +	/**
>> +	 * @base: Base GEM object
>> +	 */
>> +	struct drm_gem_object base;
>> +
>> +	/**
>> +	 * @pages_lock: Protects the page table and use count
>> +	 */
>> +	struct mutex pages_lock;
>> +
>> +	/**
>> +	 * @pages: Page table
>> +	 */
>> +	struct page **pages;
>> +
>> +	/**
>> +	 * @pages_use_count:
>> +	 *
>> +	 * Reference count on the pages table.
>> +	 * The pages are put when the count reaches zero.
>> +	 */
>> +	unsigned int pages_use_count;
>> +
>> +	/**
>> +	 * @pages_mark_dirty_on_put:
>> +	 *
>> +	 * Mark pages as dirty when they are put.
>> +	 */
>> +	unsigned int pages_mark_dirty_on_put    : 1;
>> +
>> +	/**
>> +	 * @pages_mark_accessed_on_put:
>> +	 *
>> +	 * Mark pages as accessed when they are put.
>> +	 */
>> +	unsigned int pages_mark_accessed_on_put : 1;
>> +
>> +	/**
>> +	 * @sgt: Scatter/gather table for imported PRIME buffers
>> +	 */
>> +	struct sg_table *sgt;
>> +
>> +	/**
>> +	 * @vmap_lock: Protects the vmap address and use count
>> +	 */
>> +	struct mutex vmap_lock;
>> +
>> +	/**
>> +	 * @vaddr: Kernel virtual address of the backing memory
>> +	 */
>> +	void *vaddr;
>> +
>> +	/**
>> +	 * @vmap_use_count:
>> +	 *
>> +	 * Reference count on the virtual address.
>> +	 * The address are un-mapped when the count reaches zero.
>> +	 */
>> +	unsigned int vmap_use_count;
>> +};
>> +
>> +#define to_drm_gem_shmem_obj(obj) \
>> +	container_of(obj, struct drm_gem_shmem_object, base)
>> +
>> +/**
>> + * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
>> + * @name: name for the generated structure
>> + *
>> + * This macro autogenerates a suitable &struct file_operations for shmem based
>> + * drivers, which can be assigned to &drm_driver.fops. Note that this structure
>> + * cannot be shared between drivers, because it contains a reference to the
>> + * current module using THIS_MODULE.
>> + *
>> + * Note that the declaration is already marked as static - if you need a
>> + * non-static version of this you're probably doing it wrong and will break the
>> + * THIS_MODULE reference by accident.
>> + */
>> +#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
>> +	static const struct file_operations name = {\
>> +		.owner		= THIS_MODULE,\
>> +		.open		= drm_open,\
>> +		.release	= drm_release,\
>> +		.unlocked_ioctl	= drm_ioctl,\
>> +		.compat_ioctl	= drm_compat_ioctl,\
>> +		.poll		= drm_poll,\
>> +		.read		= drm_read,\
>> +		.llseek		= noop_llseek,\
>> +		.mmap		= drm_gem_shmem_mmap, \
>> +	}
>> +
>> +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
>> +void drm_gem_shmem_free_object(struct drm_gem_object *obj);
>> +
>> +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
>> +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
>> +int drm_gem_shmem_pin(struct drm_gem_object *obj);
>> +void drm_gem_shmem_unpin(struct drm_gem_object *obj);
>> +void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
>> +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
>> +
>> +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
>> +			      struct drm_mode_create_dumb *args);
>> +
>> +int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
>> +
>> +extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
>> +
>> +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
>> +			      const struct drm_gem_object *obj);
>> +
>> +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
>> +struct drm_gem_object *
>> +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
>> +				    struct dma_buf_attachment *attach,
>> +				    struct sg_table *sgt);
>> +
>> +/**
>> + * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
>> + *
>> + * This macro provides a shortcut for setting the shmem GEM operations in
>> + * the &drm_driver structure.
>> + */
>> +#define DRM_GEM_SHMEM_DRIVER_OPS \
>> +	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
>> +	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
>> +	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
>> +	.gem_prime_mmap		= drm_gem_prime_mmap, \
>> +	.dumb_create		= drm_gem_shmem_dumb_create
>> +
>> +#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
>> -- 
>> 2.15.1
>>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
Daniel Vetter Oct. 23, 2018, 1:50 p.m. UTC | #3
On Mon, Oct 22, 2018 at 04:15:48PM +0200, Noralf Trønnes wrote:
> 
> Den 17.10.2018 17.46, skrev Daniel Vetter:
> > On Wed, Oct 17, 2018 at 03:04:53PM +0200, Noralf Trønnes wrote:
> > > This adds a library for shmem backed GEM objects.
> > > 
> > > v5:
> > > - Drop drm_gem_shmem_prime_mmap() (Daniel Vetter)
> > > - drm_gem_shmem_mmap(): Subtract drm_vma_node_start() to get the real
> > >    vma->vm_pgoff
> > > - drm_gem_shmem_fault(): Use vmf->pgoff now that vma->vm_pgoff is correct
> > > 
> > > v4:
> > > - Drop cache modes (Thomas Hellstrom)
> > > - Add a GEM attached vtable
> > > 
> > > v3:
> > > - Grammar (Sam Ravnborg)
> > > - s/drm_gem_shmem_put_pages_unlocked/drm_gem_shmem_put_pages_locked/
> > >    (Sam Ravnborg)
> > > - Add debug output in error path (Sam Ravnborg)
> > > 
> > > Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
> > > ---
> > >   Documentation/gpu/drm-kms-helpers.rst  |  12 +
> > >   drivers/gpu/drm/Kconfig                |   6 +
> > >   drivers/gpu/drm/Makefile               |   1 +
> > >   drivers/gpu/drm/drm_gem_shmem_helper.c | 551 +++++++++++++++++++++++++++++++++
> > >   include/drm/drm_gem_shmem_helper.h     | 153 +++++++++
> > >   5 files changed, 723 insertions(+)
> > >   create mode 100644 drivers/gpu/drm/drm_gem_shmem_helper.c
> > >   create mode 100644 include/drm/drm_gem_shmem_helper.h
> > > 
> 
> <snip>
> 
> > > +static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> > > +{
> > > +	struct vm_area_struct *vma = vmf->vma;
> > > +	struct drm_gem_object *obj = vma->vm_private_data;
> > > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> > > +	loff_t num_pages = obj->size >> PAGE_SHIFT;
> > > +	struct page *page;
> > > +
> > > +	if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
> > > +		return VM_FAULT_SIGBUS;
> > > +
> > > +	page = shmem->pages[vmf->pgoff];
> > > +
> > > +	return vmf_insert_page(vma, vmf->address, page);
> > > +}
> > > +
> > > +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> > > +{
> > > +	struct drm_gem_object *obj = vma->vm_private_data;
> > > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> > > +
> > > +	drm_gem_shmem_put_pages(shmem);
> > Imbalance get/put_pages here, if someone forks (which is what calls
> > vm_ops->open on an existing vma).
> > 
> > > +	drm_gem_vm_close(vma);
> > > +}
> > > +
> > > +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> > > +	.fault = drm_gem_shmem_fault,
> > > +	.open = drm_gem_vm_open,
> > > +	.close = drm_gem_shmem_vm_close,
> > > +};
> > > +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
> > > +
> > > +/**
> > > + * drm_gem_shmem_mmap - Memory-map a shmem GEM object
> > > + * @filp: File object
> > > + * @vma: VMA for the area to be mapped
> > > + *
> > > + * This function implements an augmented version of the GEM DRM file mmap
> > > + * operation for shmem objects. Drivers which employ the shmem helpers should
> > > + * use this function as their &file_operations.mmap handler in the DRM device file's
> > > + * file_operations structure.
> > > + *
> > > + * Instead of directly referencing this function, drivers should use the
> > > + * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
> > > + *
> > > + * Returns:
> > > + * 0 on success or a negative error code on failure.
> > > + */
> > Between cma helpers, gem shmem helpers and the drm_gem_mmap we now have
> > quite a bit a confusion of mmap functions. I think it'd be good to update
> > the kerneldoc for drm_gem_mmap() to point at these here (both the shmem
> > and cma version), so that people prefer to use these helpers here. Direct
> > call to drm_gem_mmap would only be needed if you want to use your own
> > vm_ops.
> > 
> > > +int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
> > > +{
> > > +	struct drm_gem_shmem_object *shmem;
> > > +	int ret;
> > > +
> > > +	ret = drm_gem_mmap(filp, vma);
> > > +	if (ret)
> > > +		return ret;
> > > +
> > > +	shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
> > > +
> > > +	ret = drm_gem_shmem_get_pages(shmem);
> > > +	if (ret) {
> > > +		drm_gem_vm_close(vma);
> > > +		return ret;
> > > +	}
> > > +
> > > +	/* VM_PFNMAP was set by drm_gem_mmap() */
> > > +	vma->vm_flags &= ~VM_PFNMAP;
> > > +	vma->vm_flags |= VM_MIXEDMAP;
> > > +	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
> > > +
> > > +	fput(vma->vm_file);
> > > +	vma->vm_file = get_file(shmem->base.filp);
> > > +	/* Remove the fake offset */
> > > +	vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
> > > +
> > > +	return 0;
> > > +}
> > > +EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
> > Looking through your mmap code there's 2 bits:
> > - shmem isn't coherent,
> 
> You saved me in the last minute with that comment Daniel.
> 
> I just remembered that on the Raspberry Pi I need to swap the bytes (RGB565,
> SPI big endian) before transfer because its SPI can't do 16-bit transfers.
> So I removed the swapping and sent the shmem buffer straight through to SPI.
> This resulted in fbcon looking erratic and "distorted", so it didn't work
> with the generic fbdev emulation shadow buffer being copied by the cpu to
> the shmem buffer which is DMA'ed out over SPI. Even though the SPI core is
> using the streaming DMA API.
> (apparently I've worked so much on DRM helper refactoring that I've
> forgotten how the tinydrm drivers work...)
> 
> So it looks like I can't use shmem for the current tinydrm drivers, which
> all use SPI. Unless there's a solution to my problem, I guess I'll I have
> to postpone this shmem work until I can use it in a driver that I'm planning
> to do further down the road. It will compress the framebuffer before
> transfer over USB so no DMA on the shmem buffer.
> 
> Do you want me to apply the first 3 patches anyway, even though the shmem
> stuff has to wait?

Yeah I guess back to drawing board.

It looks like there's interests from others (Christian from AMD) on the
vtables stuff. So just pushing that, including a todo.rst entry to convert
drivers over, sounds like a good idea.

> 
> > if you try to actually buffer share this with
> >    prime I expect it'll fail badly. We'd need begin/end_cpu_access
> >    callbacks in dma_buf_ops to make this work. So not entirely sure how
> >    much value there is in implementing the prime mmap stuff (but doesn't
> >    hurt to have it).
> 
> The generic fbdev emulation uses PRIME mmap.

Oops. Works for CMA, doesn't work so well for shmem (assuming we have
non-coherent dma somewhere).

Maybe we can assume that all shmem using drivers will employ some kind of
manual upload (i.e. dirty), and then we could call the flush functions
from there?

> > - shmem already has an mmap implemtation, you can just use that. Needs a
> >    similar mmap forwarding trick as you've done for prime mmap, but instead
> >    you forward to obj->base.filp. That allows you to cut away all the
> >    vm_ops and everything. I guess we could clean this up in a follow-up,
> >    since it doesn't have an effect on the interfaces exposed to drivers. At
> >    least not a big one, drm_gem_shmem_vm_ops would disappear.
> 
> This approach using the shmem mmap seemed to work, not sure if the vm_flags
> needs to be touched though.
> (I'm putting it up here for the record, increases the chance of me finding
> it when I pick up the shmem helper further down the road)

vm_flags touching shouldn't be necessary anymore, you don't use
vm_insert_pfn. Which is what requires the VM_PFNMAP/MIXEDMAP stuff,
depending upon which vm_insert* function you're using. Afaiui at least,
not a core -mm expert at all.

But yes, this is what I had in mind.
-Daniel

> 
> int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
> {
>     struct drm_gem_object *obj;
>     int ret;
> 
>     ret = drm_gem_mmap(filp, vma);
>     if (ret)
>         return ret;
> 
>     obj = vma->vm_private_data;
> 
>     /* Remove the fake offset */
>     vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
> 
>     /* VM_PFNMAP was set by drm_gem_mmap() */
>     vma->vm_flags &= ~VM_PFNMAP;
>     vma->vm_flags |= VM_MIXEDMAP;
>     vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
> 
>     fput(vma->vm_file);
>     vma->vm_file = get_file(obj->filp);
> 
>     drm_gem_object_put_unlocked(obj);
> 
>     return call_mmap(vma->vm_file, vma);
> }
> 
> 
> Noralf.
> 
> 
> > > +
> > > +/**
> > > + * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
> > > + * @p: DRM printer
> > > + * @indent: Tab indentation level
> > > + * @obj: GEM object
> > Would be good to reference the hook this is meant for:
> > &drm_gem_object_funcs.print_info, same for all the others. For those
> > drivers that want to pick&choose.
> > 
> > Cheers, Daniel
> > 
> > > + */
> > > +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
> > > +			      const struct drm_gem_object *obj)
> > > +{
> > > +	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> > > +
> > > +	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
> > > +	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
> > > +	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
> > > +}
> > > +EXPORT_SYMBOL(drm_gem_shmem_print_info);
> > > +
> > > +/**
> > > + * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
> > > + *                              pages for a shmem GEM object
> > > + * @obj: GEM object
> > > + *
> > > + * This function exports a scatter/gather table suitable for PRIME usage by
> > > + * calling the standard DMA mapping API.
> > > + *
> > > + * Returns:
> > > + * A pointer to the scatter/gather table of pinned pages or NULL on failure.
> > > + */
> > > +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
> > > +{
> > > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> > > +
> > > +	return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
> > > +}
> > > +EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
> > > +
> > > +/**
> > > + * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
> > > + *                 another driver's scatter/gather table of pinned pages
> > > + * @dev: Device to import into
> > > + * @attach: DMA-BUF attachment
> > > + * @sgt: Scatter/gather table of pinned pages
> > > + *
> > > + * This function imports a scatter/gather table exported via DMA-BUF by
> > > + * another driver. Drivers that use the shmem helpers should set this as their
> > > + * &drm_driver.gem_prime_import_sg_table callback.
> > > + *
> > > + * Returns:
> > > + * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
> > > + * error code on failure.
> > > + */
> > > +struct drm_gem_object *
> > > +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
> > > +				    struct dma_buf_attachment *attach,
> > > +				    struct sg_table *sgt)
> > > +{
> > > +	size_t size = PAGE_ALIGN(attach->dmabuf->size);
> > > +	size_t npages = size >> PAGE_SHIFT;
> > > +	struct drm_gem_shmem_object *shmem;
> > > +	int ret;
> > > +
> > > +	shmem = drm_gem_shmem_create(dev, size);
> > > +	if (IS_ERR(shmem))
> > > +		return ERR_CAST(shmem);
> > > +
> > > +	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> > > +	if (!shmem->pages) {
> > > +		ret = -ENOMEM;
> > > +		goto err_free_gem;
> > > +	}
> > > +
> > > +	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
> > > +	if (ret < 0)
> > > +		goto err_free_array;
> > > +
> > > +	shmem->sgt = sgt;
> > > +	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
> > > +
> > > +	DRM_DEBUG_PRIME("size = %zu\n", size);
> > > +
> > > +	return &shmem->base;
> > > +
> > > +err_free_array:
> > > +	kvfree(shmem->pages);
> > > +err_free_gem:
> > > +	drm_gem_object_put_unlocked(&shmem->base);
> > > +
> > > +	return ERR_PTR(ret);
> > > +}
> > > +EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
> > > diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
> > > new file mode 100644
> > > index 000000000000..26b05e06407d
> > > --- /dev/null
> > > +++ b/include/drm/drm_gem_shmem_helper.h
> > > @@ -0,0 +1,153 @@
> > > +/* SPDX-License-Identifier: GPL-2.0 */
> > > +
> > > +#ifndef __DRM_GEM_SHMEM_HELPER_H__
> > > +#define __DRM_GEM_SHMEM_HELPER_H__
> > > +
> > > +#include <linux/fs.h>
> > > +#include <linux/mm.h>
> > > +#include <linux/mutex.h>
> > > +
> > > +#include <drm/drm_file.h>
> > > +#include <drm/drm_gem.h>
> > > +#include <drm/drm_ioctl.h>
> > > +#include <drm/drm_prime.h>
> > > +
> > > +struct dma_buf_attachment;
> > > +struct drm_mode_create_dumb;
> > > +struct drm_printer;
> > > +struct sg_table;
> > > +
> > > +/**
> > > + * struct drm_gem_shmem_object - GEM object backed by shmem
> > > + */
> > > +struct drm_gem_shmem_object {
> > > +	/**
> > > +	 * @base: Base GEM object
> > > +	 */
> > > +	struct drm_gem_object base;
> > > +
> > > +	/**
> > > +	 * @pages_lock: Protects the page table and use count
> > > +	 */
> > > +	struct mutex pages_lock;
> > > +
> > > +	/**
> > > +	 * @pages: Page table
> > > +	 */
> > > +	struct page **pages;
> > > +
> > > +	/**
> > > +	 * @pages_use_count:
> > > +	 *
> > > +	 * Reference count on the pages table.
> > > +	 * The pages are put when the count reaches zero.
> > > +	 */
> > > +	unsigned int pages_use_count;
> > > +
> > > +	/**
> > > +	 * @pages_mark_dirty_on_put:
> > > +	 *
> > > +	 * Mark pages as dirty when they are put.
> > > +	 */
> > > +	unsigned int pages_mark_dirty_on_put    : 1;
> > > +
> > > +	/**
> > > +	 * @pages_mark_accessed_on_put:
> > > +	 *
> > > +	 * Mark pages as accessed when they are put.
> > > +	 */
> > > +	unsigned int pages_mark_accessed_on_put : 1;
> > > +
> > > +	/**
> > > +	 * @sgt: Scatter/gather table for imported PRIME buffers
> > > +	 */
> > > +	struct sg_table *sgt;
> > > +
> > > +	/**
> > > +	 * @vmap_lock: Protects the vmap address and use count
> > > +	 */
> > > +	struct mutex vmap_lock;
> > > +
> > > +	/**
> > > +	 * @vaddr: Kernel virtual address of the backing memory
> > > +	 */
> > > +	void *vaddr;
> > > +
> > > +	/**
> > > +	 * @vmap_use_count:
> > > +	 *
> > > +	 * Reference count on the virtual address.
> > > +	 * The address are un-mapped when the count reaches zero.
> > > +	 */
> > > +	unsigned int vmap_use_count;
> > > +};
> > > +
> > > +#define to_drm_gem_shmem_obj(obj) \
> > > +	container_of(obj, struct drm_gem_shmem_object, base)
> > > +
> > > +/**
> > > + * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
> > > + * @name: name for the generated structure
> > > + *
> > > + * This macro autogenerates a suitable &struct file_operations for shmem based
> > > + * drivers, which can be assigned to &drm_driver.fops. Note that this structure
> > > + * cannot be shared between drivers, because it contains a reference to the
> > > + * current module using THIS_MODULE.
> > > + *
> > > + * Note that the declaration is already marked as static - if you need a
> > > + * non-static version of this you're probably doing it wrong and will break the
> > > + * THIS_MODULE reference by accident.
> > > + */
> > > +#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
> > > +	static const struct file_operations name = {\
> > > +		.owner		= THIS_MODULE,\
> > > +		.open		= drm_open,\
> > > +		.release	= drm_release,\
> > > +		.unlocked_ioctl	= drm_ioctl,\
> > > +		.compat_ioctl	= drm_compat_ioctl,\
> > > +		.poll		= drm_poll,\
> > > +		.read		= drm_read,\
> > > +		.llseek		= noop_llseek,\
> > > +		.mmap		= drm_gem_shmem_mmap, \
> > > +	}
> > > +
> > > +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
> > > +void drm_gem_shmem_free_object(struct drm_gem_object *obj);
> > > +
> > > +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
> > > +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
> > > +int drm_gem_shmem_pin(struct drm_gem_object *obj);
> > > +void drm_gem_shmem_unpin(struct drm_gem_object *obj);
> > > +void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
> > > +void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
> > > +
> > > +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
> > > +			      struct drm_mode_create_dumb *args);
> > > +
> > > +int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
> > > +
> > > +extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
> > > +
> > > +void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
> > > +			      const struct drm_gem_object *obj);
> > > +
> > > +struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
> > > +struct drm_gem_object *
> > > +drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
> > > +				    struct dma_buf_attachment *attach,
> > > +				    struct sg_table *sgt);
> > > +
> > > +/**
> > > + * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
> > > + *
> > > + * This macro provides a shortcut for setting the shmem GEM operations in
> > > + * the &drm_driver structure.
> > > + */
> > > +#define DRM_GEM_SHMEM_DRIVER_OPS \
> > > +	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
> > > +	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
> > > +	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
> > > +	.gem_prime_mmap		= drm_gem_prime_mmap, \
> > > +	.dumb_create		= drm_gem_shmem_dumb_create
> > > +
> > > +#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
> > > -- 
> > > 2.15.1
> > > 
> > > _______________________________________________
> > > dri-devel mailing list
> > > dri-devel@lists.freedesktop.org
> > > https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
Eric Anholt Nov. 27, 2018, 12:36 a.m. UTC | #4
Noralf Trønnes <noralf@tronnes.org> writes:
> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> +{
> +	struct drm_gem_object *obj = vma->vm_private_data;
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	drm_gem_shmem_put_pages(shmem);
> +	drm_gem_vm_close(vma);
> +}
> +
> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> +	.fault = drm_gem_shmem_fault,
> +	.open = drm_gem_vm_open,
> +	.close = drm_gem_shmem_vm_close,
> +};
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);

I just saw a warning from drm_gem_shmem_put_pages() for
!shmem->pages_use_count -- I think drm_gem_vm_open() needs to
drm_gem_shmem_get_pages().
Daniel Vetter Nov. 27, 2018, 8:58 a.m. UTC | #5
On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
> Noralf Trønnes <noralf@tronnes.org> writes:
> > +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> > +{
> > +	struct drm_gem_object *obj = vma->vm_private_data;
> > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> > +
> > +	drm_gem_shmem_put_pages(shmem);
> > +	drm_gem_vm_close(vma);
> > +}
> > +
> > +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> > +	.fault = drm_gem_shmem_fault,
> > +	.open = drm_gem_vm_open,
> > +	.close = drm_gem_shmem_vm_close,
> > +};
> > +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
> 
> I just saw a warning from drm_gem_shmem_put_pages() for
> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
> drm_gem_shmem_get_pages().

Yeah we need a drm_gem_shmem_vm_open here.
-Daniel
Eric Anholt Nov. 27, 2018, 8:38 p.m. UTC | #6
Daniel Vetter <daniel@ffwll.ch> writes:

> On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
>> Noralf Trønnes <noralf@tronnes.org> writes:
>> > +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>> > +{
>> > +	struct drm_gem_object *obj = vma->vm_private_data;
>> > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> > +
>> > +	drm_gem_shmem_put_pages(shmem);
>> > +	drm_gem_vm_close(vma);
>> > +}
>> > +
>> > +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>> > +	.fault = drm_gem_shmem_fault,
>> > +	.open = drm_gem_vm_open,
>> > +	.close = drm_gem_shmem_vm_close,
>> > +};
>> > +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>> 
>> I just saw a warning from drm_gem_shmem_put_pages() for
>> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
>> drm_gem_shmem_get_pages().
>
> Yeah we need a drm_gem_shmem_vm_open here.

Adding one of those fixed my refcounting issues, so I've sent out a v6
with it.
Daniel Vetter Nov. 28, 2018, 8:22 a.m. UTC | #7
On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
> Daniel Vetter <daniel@ffwll.ch> writes:
> 
> > On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
> >> Noralf Trønnes <noralf@tronnes.org> writes:
> >> > +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> >> > +{
> >> > +	struct drm_gem_object *obj = vma->vm_private_data;
> >> > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> >> > +
> >> > +	drm_gem_shmem_put_pages(shmem);
> >> > +	drm_gem_vm_close(vma);
> >> > +}
> >> > +
> >> > +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> >> > +	.fault = drm_gem_shmem_fault,
> >> > +	.open = drm_gem_vm_open,
> >> > +	.close = drm_gem_shmem_vm_close,
> >> > +};
> >> > +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
> >> 
> >> I just saw a warning from drm_gem_shmem_put_pages() for
> >> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
> >> drm_gem_shmem_get_pages().
> >
> > Yeah we need a drm_gem_shmem_vm_open here.
> 
> Adding one of those fixed my refcounting issues, so I've sent out a v6
> with it.

Just realized that I've reviewed this patch already, spotted that vma
management issue there too. Plus a pile of other things. From reading that
other thread discussion with Noralf concluded with "not yet ready for
prime time" unfortunately :-/
-Daniel
Eric Anholt Nov. 28, 2018, 9:52 p.m. UTC | #8
Daniel Vetter <daniel@ffwll.ch> writes:

> On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
>> Daniel Vetter <daniel@ffwll.ch> writes:
>> 
>> > On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
>> >> Noralf Trønnes <noralf@tronnes.org> writes:
>> >> > +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>> >> > +{
>> >> > +	struct drm_gem_object *obj = vma->vm_private_data;
>> >> > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> >> > +
>> >> > +	drm_gem_shmem_put_pages(shmem);
>> >> > +	drm_gem_vm_close(vma);
>> >> > +}
>> >> > +
>> >> > +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>> >> > +	.fault = drm_gem_shmem_fault,
>> >> > +	.open = drm_gem_vm_open,
>> >> > +	.close = drm_gem_shmem_vm_close,
>> >> > +};
>> >> > +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>> >> 
>> >> I just saw a warning from drm_gem_shmem_put_pages() for
>> >> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
>> >> drm_gem_shmem_get_pages().
>> >
>> > Yeah we need a drm_gem_shmem_vm_open here.
>> 
>> Adding one of those fixed my refcounting issues, so I've sent out a v6
>> with it.
>
> Just realized that I've reviewed this patch already, spotted that vma
> management issue there too. Plus a pile of other things. From reading that
> other thread discussion with Noralf concluded with "not yet ready for
> prime time" unfortunately :-/

I saw stuff about how it wasn't usable for SPI because SPI does weird
things with DMA mapping.  Was there something else?
Daniel Vetter Nov. 29, 2018, 9:17 a.m. UTC | #9
On Wed, Nov 28, 2018 at 01:52:56PM -0800, Eric Anholt wrote:
> Daniel Vetter <daniel@ffwll.ch> writes:
> 
> > On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
> >> Daniel Vetter <daniel@ffwll.ch> writes:
> >> 
> >> > On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
> >> >> Noralf Trønnes <noralf@tronnes.org> writes:
> >> >> > +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> >> >> > +{
> >> >> > +	struct drm_gem_object *obj = vma->vm_private_data;
> >> >> > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> >> >> > +
> >> >> > +	drm_gem_shmem_put_pages(shmem);
> >> >> > +	drm_gem_vm_close(vma);
> >> >> > +}
> >> >> > +
> >> >> > +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> >> >> > +	.fault = drm_gem_shmem_fault,
> >> >> > +	.open = drm_gem_vm_open,
> >> >> > +	.close = drm_gem_shmem_vm_close,
> >> >> > +};
> >> >> > +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
> >> >> 
> >> >> I just saw a warning from drm_gem_shmem_put_pages() for
> >> >> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
> >> >> drm_gem_shmem_get_pages().
> >> >
> >> > Yeah we need a drm_gem_shmem_vm_open here.
> >> 
> >> Adding one of those fixed my refcounting issues, so I've sent out a v6
> >> with it.
> >
> > Just realized that I've reviewed this patch already, spotted that vma
> > management issue there too. Plus a pile of other things. From reading that
> > other thread discussion with Noralf concluded with "not yet ready for
> > prime time" unfortunately :-/
> 
> I saw stuff about how it wasn't usable for SPI because SPI does weird
> things with DMA mapping.  Was there something else?

Looking through that mail it was a bunch of comments to improve the
kerneldoc. Plus a note that buffer sharing/mmap is going to be all
incoherent and horrible (but I guess for vkms we don't care that much).
I'm just kinda vary of generic buffer handling that turns out to not be
actually all that useful. We have lots of deadends and kinda-midlayers in
this area already (but this one here definitely smells plenty better than
lots of older ones).
-Daniel
Eric Anholt Nov. 29, 2018, 11:58 p.m. UTC | #10
Daniel Vetter <daniel@ffwll.ch> writes:

> On Wed, Nov 28, 2018 at 01:52:56PM -0800, Eric Anholt wrote:
>> Daniel Vetter <daniel@ffwll.ch> writes:
>> 
>> > On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
>> >> Daniel Vetter <daniel@ffwll.ch> writes:
>> >> 
>> >> > On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
>> >> >> Noralf Trønnes <noralf@tronnes.org> writes:
>> >> >> > +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>> >> >> > +{
>> >> >> > +	struct drm_gem_object *obj = vma->vm_private_data;
>> >> >> > +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> >> >> > +
>> >> >> > +	drm_gem_shmem_put_pages(shmem);
>> >> >> > +	drm_gem_vm_close(vma);
>> >> >> > +}
>> >> >> > +
>> >> >> > +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>> >> >> > +	.fault = drm_gem_shmem_fault,
>> >> >> > +	.open = drm_gem_vm_open,
>> >> >> > +	.close = drm_gem_shmem_vm_close,
>> >> >> > +};
>> >> >> > +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>> >> >> 
>> >> >> I just saw a warning from drm_gem_shmem_put_pages() for
>> >> >> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
>> >> >> drm_gem_shmem_get_pages().
>> >> >
>> >> > Yeah we need a drm_gem_shmem_vm_open here.
>> >> 
>> >> Adding one of those fixed my refcounting issues, so I've sent out a v6
>> >> with it.
>> >
>> > Just realized that I've reviewed this patch already, spotted that vma
>> > management issue there too. Plus a pile of other things. From reading that
>> > other thread discussion with Noralf concluded with "not yet ready for
>> > prime time" unfortunately :-/
>> 
>> I saw stuff about how it wasn't usable for SPI because SPI does weird
>> things with DMA mapping.  Was there something else?
>
> Looking through that mail it was a bunch of comments to improve the
> kerneldoc. Plus a note that buffer sharing/mmap is going to be all
> incoherent and horrible (but I guess for vkms we don't care that much).
> I'm just kinda vary of generic buffer handling that turns out to not be
> actually all that useful. We have lots of deadends and kinda-midlayers in
> this area already (but this one here definitely smells plenty better than
> lots of older ones).

FWIW, I really want shmem helpers for v3d.  The fault handling in
particular has magic I don't understand, and this is not my first fault
handler. :/
Noralf Trønnes Dec. 2, 2018, 3:58 p.m. UTC | #11
Den 30.11.2018 00.58, skrev Eric Anholt:
> Daniel Vetter <daniel@ffwll.ch> writes:
>
>> On Wed, Nov 28, 2018 at 01:52:56PM -0800, Eric Anholt wrote:
>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>
>>>> On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>>
>>>>>> On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
>>>>>>> Noralf Trønnes <noralf@tronnes.org> writes:
>>>>>>>> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>>>>>>>> +{
>>>>>>>> +	struct drm_gem_object *obj = vma->vm_private_data;
>>>>>>>> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>>>>>>>> +
>>>>>>>> +	drm_gem_shmem_put_pages(shmem);
>>>>>>>> +	drm_gem_vm_close(vma);
>>>>>>>> +}
>>>>>>>> +
>>>>>>>> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>>>>>>>> +	.fault = drm_gem_shmem_fault,
>>>>>>>> +	.open = drm_gem_vm_open,
>>>>>>>> +	.close = drm_gem_shmem_vm_close,
>>>>>>>> +};
>>>>>>>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>>>>>>> I just saw a warning from drm_gem_shmem_put_pages() for
>>>>>>> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
>>>>>>> drm_gem_shmem_get_pages().
>>>>>> Yeah we need a drm_gem_shmem_vm_open here.
>>>>> Adding one of those fixed my refcounting issues, so I've sent out a v6
>>>>> with it.
>>>> Just realized that I've reviewed this patch already, spotted that vma
>>>> management issue there too. Plus a pile of other things. From reading that
>>>> other thread discussion with Noralf concluded with "not yet ready for
>>>> prime time" unfortunately :-/
>>> I saw stuff about how it wasn't usable for SPI because SPI does weird
>>> things with DMA mapping.  Was there something else?
>> Looking through that mail it was a bunch of comments to improve the
>> kerneldoc. Plus a note that buffer sharing/mmap is going to be all
>> incoherent and horrible (but I guess for vkms we don't care that much).
>> I'm just kinda vary of generic buffer handling that turns out to not be
>> actually all that useful. We have lots of deadends and kinda-midlayers in
>> this area already (but this one here definitely smells plenty better than
>> lots of older ones).
> FWIW, I really want shmem helpers for v3d.  The fault handling in
> particular has magic I don't understand, and this is not my first fault
> handler. :/


If you can use it for a "real" hw driver like v3d, I think it makes a lot
sense to have it as a helper. I believe udl and a future simpledrm can
also make use of it.

I have an idea about a usb driver that I hope to work on somewhere down
the line that will need this kind of code. So my plan was to resurrect
this code when I got there.

I agree that fault handling looks a bit like magic. I looked at all the
drivers that uses shmem buffers to see what they where doing. When Daniel
put me on the right track with the fake offsets, the fault handler ended
up being quite small.

Having a helper like this that can actually be used for real hw drivers
(if it can, that is), increases the chance of getting this -mm stuff
right. And hopefully someone down the line having domain knowledge can
audit this code. It's less likely that this will happen with code tucked
away in a driver, especially the smaller ones.

Initially I hoped that I could make the helper compatible with vgem, so I
could convert vgem to use this helper. That would give the helper a lot
of testing, making it and keeping it solid. However vgem can get pages
one by one in the fault handler if all pages hasn't been fetched. I
didn't see an easy way to handle that together with page use counting.
The main reason for having page counting was to make it easy to bolt on a
shrinker, but I have no experience nor any knowledge about that, so I
don't know if it can be easily done.

Noralf.
Rob Herring Jan. 28, 2019, 8:57 p.m. UTC | #12
On Sun, Dec 2, 2018 at 9:59 AM Noralf Trønnes <noralf@tronnes.org> wrote:
>
>
> Den 30.11.2018 00.58, skrev Eric Anholt:
> > Daniel Vetter <daniel@ffwll.ch> writes:
> >
> >> On Wed, Nov 28, 2018 at 01:52:56PM -0800, Eric Anholt wrote:
> >>> Daniel Vetter <daniel@ffwll.ch> writes:
> >>>
> >>>> On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
> >>>>> Daniel Vetter <daniel@ffwll.ch> writes:
> >>>>>
> >>>>>> On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
> >>>>>>> Noralf Trønnes <noralf@tronnes.org> writes:
> >>>>>>>> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> >>>>>>>> +{
> >>>>>>>> +      struct drm_gem_object *obj = vma->vm_private_data;
> >>>>>>>> +      struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> >>>>>>>> +
> >>>>>>>> +      drm_gem_shmem_put_pages(shmem);
> >>>>>>>> +      drm_gem_vm_close(vma);
> >>>>>>>> +}
> >>>>>>>> +
> >>>>>>>> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> >>>>>>>> +      .fault = drm_gem_shmem_fault,
> >>>>>>>> +      .open = drm_gem_vm_open,
> >>>>>>>> +      .close = drm_gem_shmem_vm_close,
> >>>>>>>> +};
> >>>>>>>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
> >>>>>>> I just saw a warning from drm_gem_shmem_put_pages() for
> >>>>>>> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
> >>>>>>> drm_gem_shmem_get_pages().
> >>>>>> Yeah we need a drm_gem_shmem_vm_open here.
> >>>>> Adding one of those fixed my refcounting issues, so I've sent out a v6
> >>>>> with it.
> >>>> Just realized that I've reviewed this patch already, spotted that vma
> >>>> management issue there too. Plus a pile of other things. From reading that
> >>>> other thread discussion with Noralf concluded with "not yet ready for
> >>>> prime time" unfortunately :-/
> >>> I saw stuff about how it wasn't usable for SPI because SPI does weird
> >>> things with DMA mapping.  Was there something else?
> >> Looking through that mail it was a bunch of comments to improve the
> >> kerneldoc. Plus a note that buffer sharing/mmap is going to be all
> >> incoherent and horrible (but I guess for vkms we don't care that much).
> >> I'm just kinda vary of generic buffer handling that turns out to not be
> >> actually all that useful. We have lots of deadends and kinda-midlayers in
> >> this area already (but this one here definitely smells plenty better than
> >> lots of older ones).
> > FWIW, I really want shmem helpers for v3d.  The fault handling in
> > particular has magic I don't understand, and this is not my first fault
> > handler. :/
>
>
> If you can use it for a "real" hw driver like v3d, I think it makes a lot
> sense to have it as a helper. I believe udl and a future simpledrm can
> also make use of it.

FWIW, I think etnaviv at least could use this too.

I'm starting to look at panfrost and lima drivers and was trying to
figure out where to start with the GEM code. So I've been comparing
etnaviv, freedreno, and vgem implementations. They are all pretty
similar from what I see. The per driver GEM obj structs certainly are.
I can't bring myself to just copy etnaviv code over and do a
s/etnaviv/panfrost/. So searching around a bit, I ended up on this
thread. This seems to be what I need for panfrost (based on my brief
study).

Rob
Noralf Trønnes Jan. 28, 2019, 9:22 p.m. UTC | #13
Den 28.01.2019 21.57, skrev Rob Herring:
> On Sun, Dec 2, 2018 at 9:59 AM Noralf Trønnes <noralf@tronnes.org> wrote:
>>
>>
>> Den 30.11.2018 00.58, skrev Eric Anholt:
>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>
>>>> On Wed, Nov 28, 2018 at 01:52:56PM -0800, Eric Anholt wrote:
>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>>
>>>>>> On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
>>>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>>>>
>>>>>>>> On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
>>>>>>>>> Noralf Trønnes <noralf@tronnes.org> writes:
>>>>>>>>>> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>>>>>>>>>> +{
>>>>>>>>>> +      struct drm_gem_object *obj = vma->vm_private_data;
>>>>>>>>>> +      struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>>>>>>>>>> +
>>>>>>>>>> +      drm_gem_shmem_put_pages(shmem);
>>>>>>>>>> +      drm_gem_vm_close(vma);
>>>>>>>>>> +}
>>>>>>>>>> +
>>>>>>>>>> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>>>>>>>>>> +      .fault = drm_gem_shmem_fault,
>>>>>>>>>> +      .open = drm_gem_vm_open,
>>>>>>>>>> +      .close = drm_gem_shmem_vm_close,
>>>>>>>>>> +};
>>>>>>>>>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>>>>>>>>> I just saw a warning from drm_gem_shmem_put_pages() for
>>>>>>>>> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
>>>>>>>>> drm_gem_shmem_get_pages().
>>>>>>>> Yeah we need a drm_gem_shmem_vm_open here.
>>>>>>> Adding one of those fixed my refcounting issues, so I've sent out a v6
>>>>>>> with it.
>>>>>> Just realized that I've reviewed this patch already, spotted that vma
>>>>>> management issue there too. Plus a pile of other things. From reading that
>>>>>> other thread discussion with Noralf concluded with "not yet ready for
>>>>>> prime time" unfortunately :-/
>>>>> I saw stuff about how it wasn't usable for SPI because SPI does weird
>>>>> things with DMA mapping.  Was there something else?
>>>> Looking through that mail it was a bunch of comments to improve the
>>>> kerneldoc. Plus a note that buffer sharing/mmap is going to be all
>>>> incoherent and horrible (but I guess for vkms we don't care that much).
>>>> I'm just kinda vary of generic buffer handling that turns out to not be
>>>> actually all that useful. We have lots of deadends and kinda-midlayers in
>>>> this area already (but this one here definitely smells plenty better than
>>>> lots of older ones).
>>> FWIW, I really want shmem helpers for v3d.  The fault handling in
>>> particular has magic I don't understand, and this is not my first fault
>>> handler. :/
>>
>>
>> If you can use it for a "real" hw driver like v3d, I think it makes a lot
>> sense to have it as a helper. I believe udl and a future simpledrm can
>> also make use of it.
> 
> FWIW, I think etnaviv at least could use this too.
> 
> I'm starting to look at panfrost and lima drivers and was trying to
> figure out where to start with the GEM code. So I've been comparing
> etnaviv, freedreno, and vgem implementations. They are all pretty
> similar from what I see. The per driver GEM obj structs certainly are.
> I can't bring myself to just copy etnaviv code over and do a
> s/etnaviv/panfrost/. So searching around a bit, I ended up on this
> thread. This seems to be what I need for panfrost (based on my brief
> study).
> 

I gave up on this due to problems with SPI DMA.
Eric tried to use it with vkms, but it failed. On his blog he speculates
that it might be due to cached CPU mappings:
https://anholt.github.io/twivc4/2018/12/03/twiv/

For tinydrm I wanted cached mappings, but it might not work that well
with shmem. Maybe that's why I had problems with SPI DMA.

Maybe this change to drm_gem_shmem_mmap() is all it takes:

-	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

The memory subsystem is really complicated and I have kind of given up
on trying to decipher it.

Noralf.
Rob Herring Jan. 28, 2019, 10:01 p.m. UTC | #14
On Mon, Jan 28, 2019 at 3:26 PM Noralf Trønnes <noralf@tronnes.org> wrote:
>
>
>
> Den 28.01.2019 21.57, skrev Rob Herring:
> > On Sun, Dec 2, 2018 at 9:59 AM Noralf Trønnes <noralf@tronnes.org> wrote:
> >>
> >>
> >> Den 30.11.2018 00.58, skrev Eric Anholt:
> >>> Daniel Vetter <daniel@ffwll.ch> writes:
> >>>
> >>>> On Wed, Nov 28, 2018 at 01:52:56PM -0800, Eric Anholt wrote:
> >>>>> Daniel Vetter <daniel@ffwll.ch> writes:
> >>>>>
> >>>>>> On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
> >>>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
> >>>>>>>
> >>>>>>>> On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
> >>>>>>>>> Noralf Trønnes <noralf@tronnes.org> writes:
> >>>>>>>>>> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> >>>>>>>>>> +{
> >>>>>>>>>> +      struct drm_gem_object *obj = vma->vm_private_data;
> >>>>>>>>>> +      struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> >>>>>>>>>> +
> >>>>>>>>>> +      drm_gem_shmem_put_pages(shmem);
> >>>>>>>>>> +      drm_gem_vm_close(vma);
> >>>>>>>>>> +}
> >>>>>>>>>> +
> >>>>>>>>>> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> >>>>>>>>>> +      .fault = drm_gem_shmem_fault,
> >>>>>>>>>> +      .open = drm_gem_vm_open,
> >>>>>>>>>> +      .close = drm_gem_shmem_vm_close,
> >>>>>>>>>> +};
> >>>>>>>>>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
> >>>>>>>>> I just saw a warning from drm_gem_shmem_put_pages() for
> >>>>>>>>> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
> >>>>>>>>> drm_gem_shmem_get_pages().
> >>>>>>>> Yeah we need a drm_gem_shmem_vm_open here.
> >>>>>>> Adding one of those fixed my refcounting issues, so I've sent out a v6
> >>>>>>> with it.
> >>>>>> Just realized that I've reviewed this patch already, spotted that vma
> >>>>>> management issue there too. Plus a pile of other things. From reading that
> >>>>>> other thread discussion with Noralf concluded with "not yet ready for
> >>>>>> prime time" unfortunately :-/
> >>>>> I saw stuff about how it wasn't usable for SPI because SPI does weird
> >>>>> things with DMA mapping.  Was there something else?
> >>>> Looking through that mail it was a bunch of comments to improve the
> >>>> kerneldoc. Plus a note that buffer sharing/mmap is going to be all
> >>>> incoherent and horrible (but I guess for vkms we don't care that much).
> >>>> I'm just kinda vary of generic buffer handling that turns out to not be
> >>>> actually all that useful. We have lots of deadends and kinda-midlayers in
> >>>> this area already (but this one here definitely smells plenty better than
> >>>> lots of older ones).
> >>> FWIW, I really want shmem helpers for v3d.  The fault handling in
> >>> particular has magic I don't understand, and this is not my first fault
> >>> handler. :/
> >>
> >>
> >> If you can use it for a "real" hw driver like v3d, I think it makes a lot
> >> sense to have it as a helper. I believe udl and a future simpledrm can
> >> also make use of it.
> >
> > FWIW, I think etnaviv at least could use this too.
> >
> > I'm starting to look at panfrost and lima drivers and was trying to
> > figure out where to start with the GEM code. So I've been comparing
> > etnaviv, freedreno, and vgem implementations. They are all pretty
> > similar from what I see. The per driver GEM obj structs certainly are.
> > I can't bring myself to just copy etnaviv code over and do a
> > s/etnaviv/panfrost/. So searching around a bit, I ended up on this
> > thread. This seems to be what I need for panfrost (based on my brief
> > study).
> >
>
> I gave up on this due to problems with SPI DMA.
> Eric tried to use it with vkms, but it failed. On his blog he speculates
> that it might be due to cached CPU mappings:
> https://anholt.github.io/twivc4/2018/12/03/twiv/
>
> For tinydrm I wanted cached mappings, but it might not work that well
> with shmem. Maybe that's why I had problems with SPI DMA.

I think for most ARM systems, a cached mapping is not coherent. So
there will need to be cache flushes using the dma API. I see there's
been some discussion around that too.

> Maybe this change to drm_gem_shmem_mmap() is all it takes:
>
> -       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
> +       vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

Seems like at least this part needs some flexibility. etnaviv,
freedreno, and omap at least all appear to support cached, uncached,
and writecombined based on flags in the GEM object.

> The memory subsystem is really complicated and I have kind of given up
> on trying to decipher it.

Yes. All the more reason to not let each driver figure out what to do.

Rob
Eric Anholt Jan. 29, 2019, 12:19 a.m. UTC | #15
Noralf Trønnes <noralf@tronnes.org> writes:

> Den 28.01.2019 21.57, skrev Rob Herring:
>> On Sun, Dec 2, 2018 at 9:59 AM Noralf Trønnes <noralf@tronnes.org> wrote:
>>>
>>>
>>> Den 30.11.2018 00.58, skrev Eric Anholt:
>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>
>>>>> On Wed, Nov 28, 2018 at 01:52:56PM -0800, Eric Anholt wrote:
>>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>>>
>>>>>>> On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
>>>>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>>>>>
>>>>>>>>> On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
>>>>>>>>>> Noralf Trønnes <noralf@tronnes.org> writes:
>>>>>>>>>>> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>>>>>>>>>>> +{
>>>>>>>>>>> +      struct drm_gem_object *obj = vma->vm_private_data;
>>>>>>>>>>> +      struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>>>>>>>>>>> +
>>>>>>>>>>> +      drm_gem_shmem_put_pages(shmem);
>>>>>>>>>>> +      drm_gem_vm_close(vma);
>>>>>>>>>>> +}
>>>>>>>>>>> +
>>>>>>>>>>> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>>>>>>>>>>> +      .fault = drm_gem_shmem_fault,
>>>>>>>>>>> +      .open = drm_gem_vm_open,
>>>>>>>>>>> +      .close = drm_gem_shmem_vm_close,
>>>>>>>>>>> +};
>>>>>>>>>>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>>>>>>>>>> I just saw a warning from drm_gem_shmem_put_pages() for
>>>>>>>>>> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
>>>>>>>>>> drm_gem_shmem_get_pages().
>>>>>>>>> Yeah we need a drm_gem_shmem_vm_open here.
>>>>>>>> Adding one of those fixed my refcounting issues, so I've sent out a v6
>>>>>>>> with it.
>>>>>>> Just realized that I've reviewed this patch already, spotted that vma
>>>>>>> management issue there too. Plus a pile of other things. From reading that
>>>>>>> other thread discussion with Noralf concluded with "not yet ready for
>>>>>>> prime time" unfortunately :-/
>>>>>> I saw stuff about how it wasn't usable for SPI because SPI does weird
>>>>>> things with DMA mapping.  Was there something else?
>>>>> Looking through that mail it was a bunch of comments to improve the
>>>>> kerneldoc. Plus a note that buffer sharing/mmap is going to be all
>>>>> incoherent and horrible (but I guess for vkms we don't care that much).
>>>>> I'm just kinda vary of generic buffer handling that turns out to not be
>>>>> actually all that useful. We have lots of deadends and kinda-midlayers in
>>>>> this area already (but this one here definitely smells plenty better than
>>>>> lots of older ones).
>>>> FWIW, I really want shmem helpers for v3d.  The fault handling in
>>>> particular has magic I don't understand, and this is not my first fault
>>>> handler. :/
>>>
>>>
>>> If you can use it for a "real" hw driver like v3d, I think it makes a lot
>>> sense to have it as a helper. I believe udl and a future simpledrm can
>>> also make use of it.
>> 
>> FWIW, I think etnaviv at least could use this too.
>> 
>> I'm starting to look at panfrost and lima drivers and was trying to
>> figure out where to start with the GEM code. So I've been comparing
>> etnaviv, freedreno, and vgem implementations. They are all pretty
>> similar from what I see. The per driver GEM obj structs certainly are.
>> I can't bring myself to just copy etnaviv code over and do a
>> s/etnaviv/panfrost/. So searching around a bit, I ended up on this
>> thread. This seems to be what I need for panfrost (based on my brief
>> study).
>> 
>
> I gave up on this due to problems with SPI DMA.
> Eric tried to use it with vkms, but it failed. On his blog he speculates
> that it might be due to cached CPU mappings:
> https://anholt.github.io/twivc4/2018/12/03/twiv/
>
> For tinydrm I wanted cached mappings, but it might not work that well
> with shmem. Maybe that's why I had problems with SPI DMA.

Actually, for tinydrm buffers that are dma-buf exported through prime, I
really want tinydrm using WC mappings so that vc4 or v3d rendering (now
supported on Mesa master with kmsro) works.
Noralf Trønnes Jan. 29, 2019, 8:44 a.m. UTC | #16
Den 29.01.2019 01.19, skrev Eric Anholt:
> Noralf Trønnes <noralf@tronnes.org> writes:
> 
>> Den 28.01.2019 21.57, skrev Rob Herring:
>>> On Sun, Dec 2, 2018 at 9:59 AM Noralf Trønnes <noralf@tronnes.org> wrote:
>>>>
>>>>
>>>> Den 30.11.2018 00.58, skrev Eric Anholt:
>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>>
>>>>>> On Wed, Nov 28, 2018 at 01:52:56PM -0800, Eric Anholt wrote:
>>>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>>>>
>>>>>>>> On Tue, Nov 27, 2018 at 12:38:44PM -0800, Eric Anholt wrote:
>>>>>>>>> Daniel Vetter <daniel@ffwll.ch> writes:
>>>>>>>>>
>>>>>>>>>> On Mon, Nov 26, 2018 at 04:36:21PM -0800, Eric Anholt wrote:
>>>>>>>>>>> Noralf Trønnes <noralf@tronnes.org> writes:
>>>>>>>>>>>> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>>>>>>>>>>>> +{
>>>>>>>>>>>> +      struct drm_gem_object *obj = vma->vm_private_data;
>>>>>>>>>>>> +      struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>>>>>>>>>>>> +
>>>>>>>>>>>> +      drm_gem_shmem_put_pages(shmem);
>>>>>>>>>>>> +      drm_gem_vm_close(vma);
>>>>>>>>>>>> +}
>>>>>>>>>>>> +
>>>>>>>>>>>> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>>>>>>>>>>>> +      .fault = drm_gem_shmem_fault,
>>>>>>>>>>>> +      .open = drm_gem_vm_open,
>>>>>>>>>>>> +      .close = drm_gem_shmem_vm_close,
>>>>>>>>>>>> +};
>>>>>>>>>>>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>>>>>>>>>>> I just saw a warning from drm_gem_shmem_put_pages() for
>>>>>>>>>>> !shmem->pages_use_count -- I think drm_gem_vm_open() needs to
>>>>>>>>>>> drm_gem_shmem_get_pages().
>>>>>>>>>> Yeah we need a drm_gem_shmem_vm_open here.
>>>>>>>>> Adding one of those fixed my refcounting issues, so I've sent out a v6
>>>>>>>>> with it.
>>>>>>>> Just realized that I've reviewed this patch already, spotted that vma
>>>>>>>> management issue there too. Plus a pile of other things. From reading that
>>>>>>>> other thread discussion with Noralf concluded with "not yet ready for
>>>>>>>> prime time" unfortunately :-/
>>>>>>> I saw stuff about how it wasn't usable for SPI because SPI does weird
>>>>>>> things with DMA mapping.  Was there something else?
>>>>>> Looking through that mail it was a bunch of comments to improve the
>>>>>> kerneldoc. Plus a note that buffer sharing/mmap is going to be all
>>>>>> incoherent and horrible (but I guess for vkms we don't care that much).
>>>>>> I'm just kinda vary of generic buffer handling that turns out to not be
>>>>>> actually all that useful. We have lots of deadends and kinda-midlayers in
>>>>>> this area already (but this one here definitely smells plenty better than
>>>>>> lots of older ones).
>>>>> FWIW, I really want shmem helpers for v3d.  The fault handling in
>>>>> particular has magic I don't understand, and this is not my first fault
>>>>> handler. :/
>>>>
>>>>
>>>> If you can use it for a "real" hw driver like v3d, I think it makes a lot
>>>> sense to have it as a helper. I believe udl and a future simpledrm can
>>>> also make use of it.
>>>
>>> FWIW, I think etnaviv at least could use this too.
>>>
>>> I'm starting to look at panfrost and lima drivers and was trying to
>>> figure out where to start with the GEM code. So I've been comparing
>>> etnaviv, freedreno, and vgem implementations. They are all pretty
>>> similar from what I see. The per driver GEM obj structs certainly are.
>>> I can't bring myself to just copy etnaviv code over and do a
>>> s/etnaviv/panfrost/. So searching around a bit, I ended up on this
>>> thread. This seems to be what I need for panfrost (based on my brief
>>> study).
>>>
>>
>> I gave up on this due to problems with SPI DMA.
>> Eric tried to use it with vkms, but it failed. On his blog he speculates
>> that it might be due to cached CPU mappings:
>> https://anholt.github.io/twivc4/2018/12/03/twiv/
>>
>> For tinydrm I wanted cached mappings, but it might not work that well
>> with shmem. Maybe that's why I had problems with SPI DMA.
> 
> Actually, for tinydrm buffers that are dma-buf exported through prime, I
> really want tinydrm using WC mappings so that vc4 or v3d rendering (now
> supported on Mesa master with kmsro) works.
> 

I thought that the buffer is created by the GPU driver when using PRIME?
And them imported into the tinydrm driver. In which case it doesn't
matter how tinydrm creates it's dumb buffers.

That said, I will stay with CMA buffers for the SPI drivers. It just
works. The reason I looked into shmem, is that tindyrm would then be
able to import buffers that isn't contiguous in memory, thus making it
work with more GPU's. And the reason for cached buffers is that with
partial flushing the rectangle is copied to a separate buffer for
transfer and CPU acccess to WC memory is quite slow. But ofc for these
small buffers it's not that big a deal.

Noralf.
diff mbox series

Patch

diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 4b4dc236ef6f..8305d3566928 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -335,3 +335,15 @@  Legacy CRTC/Modeset Helper Functions Reference
 
 .. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
    :export:
+
+SHMEM GEM Helper Reference
+==========================
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_shmem_helper.c
+   :doc: overview
+
+.. kernel-doc:: include/drm/drm_gem_shmem_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_shmem_helper.c
+   :export:
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 736b7e67e4ec..46090a36f52e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -157,6 +157,12 @@  config DRM_KMS_CMA_HELPER
 	help
 	  Choose this if you need the KMS CMA helper functions
 
+config DRM_GEM_SHMEM_HELPER
+	bool
+	depends on DRM
+	help
+	  Choose this if you need the GEM shmem helper functions
+
 config DRM_VM
 	bool
 	depends on DRM && MMU
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 576ba985e138..8733ceb41292 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -25,6 +25,7 @@  drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
 drm-$(CONFIG_DRM_VM) += drm_vm.o
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
 drm-$(CONFIG_PCI) += ati_pcigart.o
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 drm-$(CONFIG_OF) += drm_of.o
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
new file mode 100644
index 000000000000..c4eec51d3282
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -0,0 +1,551 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for GEM objects backed by shmem buffers
+ * allocated using anonymous pageable memory.
+ */
+
+static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
+	.free = drm_gem_shmem_free_object,
+	.print_info = drm_gem_shmem_print_info,
+	.pin = drm_gem_shmem_pin,
+	.unpin = drm_gem_shmem_unpin,
+	.get_sg_table = drm_gem_shmem_get_sg_table,
+	.vmap = drm_gem_shmem_vmap,
+	.vunmap = drm_gem_shmem_vunmap,
+	.vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * drm_gem_shmem_create - Allocate an object with the given size
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This function creates a shmem GEM object.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+{
+	struct drm_gem_shmem_object *shmem;
+	struct drm_gem_object *obj;
+	int ret;
+
+	size = PAGE_ALIGN(size);
+
+	if (dev->driver->gem_create_object)
+		obj = dev->driver->gem_create_object(dev, size);
+	else
+		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	if (!obj->funcs)
+		obj->funcs = &drm_gem_shmem_funcs;
+
+	ret = drm_gem_object_init(dev, obj, size);
+	if (ret)
+		goto err_free;
+
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret)
+		goto err_release;
+
+	shmem = to_drm_gem_shmem_obj(obj);
+	mutex_init(&shmem->pages_lock);
+	mutex_init(&shmem->vmap_lock);
+
+	return shmem;
+
+err_release:
+	drm_gem_object_release(obj);
+err_free:
+	kfree(shmem);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
+
+/**
+ * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
+ * @obj: GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free_object(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	WARN_ON(shmem->vmap_use_count);
+
+	if (obj->import_attach) {
+		shmem->pages_use_count--;
+		drm_prime_gem_destroy(obj, shmem->sgt);
+		kvfree(shmem->pages);
+	}
+
+	WARN_ON(shmem->pages_use_count);
+
+	drm_gem_object_release(obj);
+	mutex_destroy(&shmem->pages_lock);
+	mutex_destroy(&shmem->vmap_lock);
+	kfree(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
+
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+	struct page **pages;
+
+	if (shmem->pages_use_count++ > 0)
+		return 0;
+
+	pages = drm_gem_get_pages(obj);
+	if (IS_ERR(pages)) {
+		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+		shmem->pages_use_count = 0;
+		return PTR_ERR(pages);
+	}
+
+	shmem->pages = pages;
+
+	return 0;
+}
+
+/*
+ * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that backing pages exists for the shmem GEM object
+ * and increases the use count.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&shmem->pages_lock);
+	if (ret)
+		return ret;
+	ret = drm_gem_shmem_get_pages_locked(shmem);
+	mutex_unlock(&shmem->pages_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_shmem_get_pages);
+
+static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+
+	if (WARN_ON_ONCE(!shmem->pages_use_count))
+		return;
+
+	if (--shmem->pages_use_count > 0)
+		return;
+
+	drm_gem_put_pages(obj, shmem->pages,
+			  shmem->pages_mark_dirty_on_put,
+			  shmem->pages_mark_accessed_on_put);
+	shmem->pages = NULL;
+}
+
+/*
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function decreases the use count and puts the backing pages when use drops to zero.
+ */
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+{
+	mutex_lock(&shmem->pages_lock);
+	drm_gem_shmem_put_pages_locked(shmem);
+	mutex_unlock(&shmem->pages_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+
+/**
+ * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function makes sure the backing pages are pinned in memory while the
+ * buffer is exported.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_pin(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	return drm_gem_shmem_get_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_pin);
+
+/**
+ * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function removes the requirement that the backing pages are pinned in
+ * memory.
+ */
+void drm_gem_shmem_unpin(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	drm_gem_shmem_put_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_unpin);
+
+static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+	int ret;
+
+	if (shmem->vmap_use_count++ > 0)
+		return shmem->vaddr;
+
+	ret = drm_gem_shmem_get_pages(shmem);
+	if (ret)
+		goto err_zero_use;
+
+	if (obj->import_attach)
+		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+	else
+		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
+
+	if (!shmem->vaddr) {
+		DRM_DEBUG_KMS("Failed to vmap pages\n");
+		ret = -ENOMEM;
+		goto err_put_pages;
+	}
+
+	return shmem->vaddr;
+
+err_put_pages:
+	drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+	shmem->vmap_use_count = 0;
+
+	return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that a virtual address exists for the buffer backing
+ * the shmem GEM object.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+	void *vaddr;
+	int ret;
+
+	ret = mutex_lock_interruptible(&shmem->vmap_lock);
+	if (ret)
+		return ERR_PTR(ret);
+	vaddr = drm_gem_shmem_vmap_locked(shmem);
+	mutex_unlock(&shmem->vmap_lock);
+
+	return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
+
+static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+
+	if (WARN_ON_ONCE(!shmem->vmap_use_count))
+		return;
+
+	if (--shmem->vmap_use_count > 0)
+		return;
+
+	if (obj->import_attach)
+		dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+	else
+		vunmap(shmem->vaddr);
+
+	shmem->vaddr = NULL;
+	drm_gem_shmem_put_pages(shmem);
+}
+
+/*
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function removes the virtual address when use count drops to zero.
+ */
+void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	mutex_lock(&shmem->vmap_lock);
+	drm_gem_shmem_vunmap_locked(shmem);
+	mutex_unlock(&shmem->vmap_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+
+static struct drm_gem_shmem_object *
+drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
+				 struct drm_device *dev, size_t size,
+				 uint32_t *handle)
+{
+	struct drm_gem_shmem_object *shmem;
+	int ret;
+
+	shmem = drm_gem_shmem_create(dev, size);
+	if (IS_ERR(shmem))
+		return shmem;
+
+	/*
+	 * Allocate an id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_put_unlocked(&shmem->base);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return shmem;
+}
+
+/**
+ * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
+ * @file: DRM file structure to create the dumb buffer for
+ * @dev: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their &drm_driver.dumb_create callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace before calling into this function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+			      struct drm_mode_create_dumb *args)
+{
+	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+	struct drm_gem_shmem_object *shmem;
+
+	if (!args->pitch || !args->size) {
+		args->pitch = min_pitch;
+		args->size = args->pitch * args->height;
+	} else {
+		/* ensure sane minimum values */
+		if (args->pitch < min_pitch)
+			args->pitch = min_pitch;
+		if (args->size < args->pitch * args->height)
+			args->size = args->pitch * args->height;
+	}
+
+	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
+
+	return PTR_ERR_OR_ZERO(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+	loff_t num_pages = obj->size >> PAGE_SHIFT;
+	struct page *page;
+
+	if (vmf->pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
+		return VM_FAULT_SIGBUS;
+
+	page = shmem->pages[vmf->pgoff];
+
+	return vmf_insert_page(vma, vmf->address, page);
+}
+
+static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	drm_gem_shmem_put_pages(shmem);
+	drm_gem_vm_close(vma);
+}
+
+const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+	.fault = drm_gem_shmem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_shmem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
+
+/**
+ * drm_gem_shmem_mmap - Memory-map a shmem GEM object
+ * @filp: File object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for shmem objects. Drivers which employ the shmem helpers should
+ * use this function as their &file_operations.mmap handler in the DRM device file's
+ * file_operations structure.
+ *
+ * Instead of directly referencing this function, drivers should use the
+ * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_gem_shmem_object *shmem;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+
+	ret = drm_gem_shmem_get_pages(shmem);
+	if (ret) {
+		drm_gem_vm_close(vma);
+		return ret;
+	}
+
+	/* VM_PFNMAP was set by drm_gem_mmap() */
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+	fput(vma->vm_file);
+	vma->vm_file = get_file(shmem->base.filp);
+	/* Remove the fake offset */
+	vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
+
+/**
+ * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ */
+void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
+			      const struct drm_gem_object *obj)
+{
+	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
+	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
+}
+EXPORT_SYMBOL(drm_gem_shmem_print_info);
+
+/**
+ * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
+ *                              pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
+
+/**
+ * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
+ *                 another driver's scatter/gather table of pinned pages
+ * @dev: Device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Drivers that use the shmem helpers should set this as their
+ * &drm_driver.gem_prime_import_sg_table callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
+				    struct dma_buf_attachment *attach,
+				    struct sg_table *sgt)
+{
+	size_t size = PAGE_ALIGN(attach->dmabuf->size);
+	size_t npages = size >> PAGE_SHIFT;
+	struct drm_gem_shmem_object *shmem;
+	int ret;
+
+	shmem = drm_gem_shmem_create(dev, size);
+	if (IS_ERR(shmem))
+		return ERR_CAST(shmem);
+
+	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+	if (!shmem->pages) {
+		ret = -ENOMEM;
+		goto err_free_gem;
+	}
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
+	if (ret < 0)
+		goto err_free_array;
+
+	shmem->sgt = sgt;
+	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
+
+	DRM_DEBUG_PRIME("size = %zu\n", size);
+
+	return &shmem->base;
+
+err_free_array:
+	kvfree(shmem->pages);
+err_free_gem:
+	drm_gem_object_put_unlocked(&shmem->base);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
new file mode 100644
index 000000000000..26b05e06407d
--- /dev/null
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -0,0 +1,153 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DRM_GEM_SHMEM_HELPER_H__
+#define __DRM_GEM_SHMEM_HELPER_H__
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+
+struct dma_buf_attachment;
+struct drm_mode_create_dumb;
+struct drm_printer;
+struct sg_table;
+
+/**
+ * struct drm_gem_shmem_object - GEM object backed by shmem
+ */
+struct drm_gem_shmem_object {
+	/**
+	 * @base: Base GEM object
+	 */
+	struct drm_gem_object base;
+
+	/**
+	 * @pages_lock: Protects the page table and use count
+	 */
+	struct mutex pages_lock;
+
+	/**
+	 * @pages: Page table
+	 */
+	struct page **pages;
+
+	/**
+	 * @pages_use_count:
+	 *
+	 * Reference count on the pages table.
+	 * The pages are put when the count reaches zero.
+	 */
+	unsigned int pages_use_count;
+
+	/**
+	 * @pages_mark_dirty_on_put:
+	 *
+	 * Mark pages as dirty when they are put.
+	 */
+	unsigned int pages_mark_dirty_on_put    : 1;
+
+	/**
+	 * @pages_mark_accessed_on_put:
+	 *
+	 * Mark pages as accessed when they are put.
+	 */
+	unsigned int pages_mark_accessed_on_put : 1;
+
+	/**
+	 * @sgt: Scatter/gather table for imported PRIME buffers
+	 */
+	struct sg_table *sgt;
+
+	/**
+	 * @vmap_lock: Protects the vmap address and use count
+	 */
+	struct mutex vmap_lock;
+
+	/**
+	 * @vaddr: Kernel virtual address of the backing memory
+	 */
+	void *vaddr;
+
+	/**
+	 * @vmap_use_count:
+	 *
+	 * Reference count on the virtual address.
+	 * The address are un-mapped when the count reaches zero.
+	 */
+	unsigned int vmap_use_count;
+};
+
+#define to_drm_gem_shmem_obj(obj) \
+	container_of(obj, struct drm_gem_shmem_object, base)
+
+/**
+ * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for shmem based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
+	static const struct file_operations name = {\
+		.owner		= THIS_MODULE,\
+		.open		= drm_open,\
+		.release	= drm_release,\
+		.unlocked_ioctl	= drm_ioctl,\
+		.compat_ioctl	= drm_compat_ioctl,\
+		.poll		= drm_poll,\
+		.read		= drm_read,\
+		.llseek		= noop_llseek,\
+		.mmap		= drm_gem_shmem_mmap, \
+	}
+
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
+void drm_gem_shmem_free_object(struct drm_gem_object *obj);
+
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_pin(struct drm_gem_object *obj);
+void drm_gem_shmem_unpin(struct drm_gem_object *obj);
+void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
+void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+			      struct drm_mode_create_dumb *args);
+
+int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+
+void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
+			      const struct drm_gem_object *obj);
+
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
+				    struct dma_buf_attachment *attach,
+				    struct sg_table *sgt);
+
+/**
+ * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
+ *
+ * This macro provides a shortcut for setting the shmem GEM operations in
+ * the &drm_driver structure.
+ */
+#define DRM_GEM_SHMEM_DRIVER_OPS \
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
+	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
+	.gem_prime_mmap		= drm_gem_prime_mmap, \
+	.dumb_create		= drm_gem_shmem_dumb_create
+
+#endif /* __DRM_GEM_SHMEM_HELPER_H__ */