diff mbox series

[v2,1/2] drm: Add library for shmem backed GEM objects

Message ID 20180902201712.45765-2-noralf@tronnes.org (mailing list archive)
State New, archived
Headers show
Series drm: Add shmem GEM library | expand

Commit Message

Noralf Trønnes Sept. 2, 2018, 8:17 p.m. UTC
This adds a library for shmem backed GEM objects with the necessary
drm_driver callbacks.

Signed-off-by: Noralf Trønnes <noralf@tronnes.org>
---

Changes since version 1:
- Fix missing argument in docs (kbuild test robot)
- Fix: sparse: expression using sizeof(void) (kbuild test robot)
- Rebasing gave a new checkpatch warning, so I changed to bitfields:
  CHECK: Avoid using bool structure members because of possible alignment issues - see: https://lkml.org/lkml/2017/11/21/384
  #834: FILE: include/drm/drm_gem_shmem_helper.h:84:
  +       bool pages_mark_dirty_on_put;
  #841: FILE: include/drm/drm_gem_shmem_helper.h:91:
  +       bool pages_mark_accessed_on_put;

 Documentation/gpu/drm-kms-helpers.rst  |  12 +
 drivers/gpu/drm/Kconfig                |   6 +
 drivers/gpu/drm/Makefile               |   1 +
 drivers/gpu/drm/drm_gem_shmem_helper.c | 676 +++++++++++++++++++++++++++++++++
 include/drm/drm_gem_shmem_helper.h     | 198 ++++++++++
 5 files changed, 893 insertions(+)
 create mode 100644 drivers/gpu/drm/drm_gem_shmem_helper.c
 create mode 100644 include/drm/drm_gem_shmem_helper.h

Comments

Sam Ravnborg Sept. 2, 2018, 8:56 p.m. UTC | #1
Hi Noralf.

Only nitpicks, I have not the background
to review the actual implmentation.
So no tags from me to put on the commit.

	Sam

> +/**
> + * drm_gem_shmem_create - Allocate an object with the given size
> + * @dev: DRM device
> + * @size: Size of the object to allocate
> + *
> + * This function creates a shmem GEM object. The default cache mode is
> + * DRM_GEM_SHMEM_BO_CACHED. The &drm_driver->gem_create_object callback can be
> + * used override this.
used to override this.
     ^^

> + *
> + * Returns:
> + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
> + * error code on failure.
> + */
> +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
> +{
> +	struct drm_gem_shmem_object *shmem;
> +	struct drm_gem_object *obj;
> +	int ret;
> +
> +	size = PAGE_ALIGN(size);
> +
> +	if (dev->driver->gem_create_object)
> +		obj = dev->driver->gem_create_object(dev, size);
> +	else
> +		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
> +	if (!obj)
> +		return ERR_PTR(-ENOMEM);
> +
> +	shmem = to_drm_gem_shmem_obj(obj);
> +
> +	if (!dev->driver->gem_create_object)
> +		shmem->cache_mode = DRM_GEM_SHMEM_BO_CACHED;
> +
> +	ret = drm_gem_object_init(dev, obj, size);
> +	if (ret)
> +		goto err_free;
Some users of drm_gem_object_init() calls drm_gem_object_put_unlocked(obj)
when there is an error. Others call kfree() liek in this case.

> +
> +	ret = drm_gem_create_mmap_offset(obj);
> +	if (ret)
> +		goto err_release;
> +
> +	mutex_init(&shmem->pages_lock);
> +	mutex_init(&shmem->vmap_lock);
> +
> +	return shmem;
> +
> +err_release:
> +	drm_gem_object_release(obj);
> +err_free:
> +	kfree(shmem);
> +
> +	return ERR_PTR(ret);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
> +
> +
> +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
> +{
> +	struct drm_gem_object *obj = &shmem->base;
> +	struct page **pages;
> +
> +	if (shmem->pages_use_count++ > 0)
> +		return 0;
> +
> +	pages = drm_gem_get_pages(obj);
> +	if (IS_ERR(pages)) {
> +		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
> +		shmem->pages_use_count = 0;
> +		return PTR_ERR(pages);
> +	}
> +
> +	shmem->pages = pages;
> +
> +	return 0;
> +}
> +
> +/*
> + * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
> + * @shmem: shmem GEM object
> + *
> + * This function makes sure that backing pages exists for the shmem GEM object
> + * and increases the use count.
> + *
> + * Returns:
> + * 0 on success or a negative error code on failure.
> + */
> +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
> +{
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&shmem->pages_lock);
> +	if (ret)
> +		return ret;
> +	ret = drm_gem_shmem_get_pages_locked(shmem);
> +	mutex_unlock(&shmem->pages_lock);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_get_pages);
> +

The functions is named *_unlocked, but called with a lock held.
Inconsistent?

> +static void drm_gem_shmem_put_pages_unlocked(struct drm_gem_shmem_object *shmem)
> +{
> +	struct drm_gem_object *obj = &shmem->base;
> +
> +	if (WARN_ON_ONCE(!shmem->pages_use_count))
> +		return;
> +
> +	if (--shmem->pages_use_count > 0)
> +		return;
> +
> +	drm_gem_put_pages(obj, shmem->pages,
> +			  shmem->pages_mark_dirty_on_put,
> +			  shmem->pages_mark_accessed_on_put);
> +	shmem->pages = NULL;
> +}
> +
> +/*
> + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
> + * @shmem: shmem GEM object
> + *
> + * This function decreases the use count and puts the backing pages when use drops to zero.
> + */
> +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
> +{
> +	mutex_lock(&shmem->pages_lock);
> +	drm_gem_shmem_put_pages_unlocked(shmem);
> +	mutex_unlock(&shmem->pages_lock);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_put_pages);
> +
> +static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
> +{
> +	struct drm_gem_object *obj = &shmem->base;
> +	int ret;
> +
> +	if (shmem->vmap_use_count++ > 0)
> +		return 0;
> +
> +	ret = drm_gem_shmem_get_pages(shmem);
> +	if (ret)
> +		goto err_zero_use;
> +
> +	if (obj->import_attach) {
> +		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
> +	} else {
> +		pgprot_t prot;
> +
> +		switch (shmem->cache_mode) {
> +		case DRM_GEM_SHMEM_BO_UNKNOWN:
No printout to help the coder that did not set this?

> +			ret = -EINVAL;
> +			goto err_put_pages;
> +
> +		case DRM_GEM_SHMEM_BO_WRITECOMBINED:
> +			prot = pgprot_writecombine(PAGE_KERNEL);
> +			break;
> +
> +		case DRM_GEM_SHMEM_BO_UNCACHED:
> +			prot = pgprot_noncached(PAGE_KERNEL);
> +			break;
> +
> +		case DRM_GEM_SHMEM_BO_CACHED:
> +			prot = PAGE_KERNEL;
> +			break;
> +		}
> +
> +		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot);
> +	}
> +
> +	if (!shmem->vaddr) {
> +		DRM_DEBUG_KMS("Failed to vmap pages\n");
> +		ret = -ENOMEM;
> +		goto err_put_pages;
> +	}
> +
> +	return 0;
> +
> +err_put_pages:
> +	drm_gem_shmem_put_pages(shmem);
> +err_zero_use:
> +	shmem->vmap_use_count = 0;
> +
> +	return ret;
> +}
> +
> +/*
> + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
> + * @shmem: shmem GEM object
> + *
> + * This function makes sure that a virtual address exists for the buffer backing
> + * the shmem GEM object.
> + *
> + * Returns:
> + * 0 on success or a negative error code on failure.
> + */
> +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem)
> +{
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&shmem->vmap_lock);
> +	if (ret)
> +		return ret;
> +	ret = drm_gem_shmem_vmap_locked(shmem);
> +	mutex_unlock(&shmem->vmap_lock);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_vmap);
> +
> +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
> +{
> +	struct drm_gem_object *obj = &shmem->base;
> +
> +	if (WARN_ON_ONCE(!shmem->vmap_use_count))
> +		return;
> +
> +	if (--shmem->vmap_use_count > 0)
> +		return;
> +
> +	if (obj->import_attach)
> +		dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
> +	else
> +		vunmap(shmem->vaddr);
> +
> +	shmem->vaddr = NULL;
> +	drm_gem_shmem_put_pages(shmem);
> +}
> +
> +/*
> + * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
> + * @shmem: shmem GEM object
> + *
> + * This function removes the virtual address when use count drops to zero.
> + */
> +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem)
> +{
> +	mutex_lock(&shmem->vmap_lock);
> +	drm_gem_shmem_vunmap_locked(shmem);
> +	mutex_unlock(&shmem->vmap_lock);
> +}
> +EXPORT_SYMBOL(drm_gem_shmem_vunmap);
> +
> +static struct drm_gem_shmem_object *
> +drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
> +				 struct drm_device *dev, size_t size,
> +				 uint32_t *handle)
> +{
> +	struct drm_gem_shmem_object *shmem;
> +	int ret;
> +
> +	shmem = drm_gem_shmem_create(dev, size);
> +	if (IS_ERR(shmem))
> +		return shmem;
> +
> +	/*
> +	 * Allocate an id of idr table where the obj is registered
> +	 * and handle has the id what user can see.
> +	 */
> +	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
> +	/* drop reference from allocate - handle holds it now. */
> +	drm_gem_object_put_unlocked(&shmem->base);
> +	if (ret)
> +		return ERR_PTR(ret);
> +
> +	return shmem;
> +}
> +
> +/**
> + * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
> + * @file: DRM file structure to create the dumb buffer for
> + * @dev: DRM device
> + * @args: IOCTL data
> + *
> + * This function computes the pitch of the dumb buffer and rounds it up to an
> + * integer number of bytes per pixel. Drivers for hardware that doesn't have
> + * any additional restrictions on the pitch can directly use this function as
> + * their &drm_driver.dumb_create callback.
> + *
> + * For hardware with additional restrictions, drivers can adjust the fields
> + * set up by userspace before calling into this function.
> + *
> + * Returns:
> + * 0 on success or a negative error code on failure.
> + */
> +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
> +			      struct drm_mode_create_dumb *args)
> +{
> +	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
> +	struct drm_gem_shmem_object *shmem;
> +
> +	if (!args->pitch || !args->size) {
> +		args->pitch = min_pitch;
> +		args->size = args->pitch * args->height;
> +	} else {
> +		/* ensure sane minimum values */
> +		if (args->pitch < min_pitch)
> +			args->pitch = min_pitch;
> +		if (args->size < args->pitch * args->height)
> +			args->size = args->pitch * args->height;
> +	}
> +
> +	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
> +
> +	return PTR_ERR_OR_ZERO(shmem);
> +}
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
> +
> +static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
> +{
> +	struct vm_area_struct *vma = vmf->vma;
> +	struct drm_gem_object *obj = vma->vm_private_data;
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +	/* We don't use vmf->pgoff since that has the fake offset: */
> +	pgoff_t pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
> +	loff_t num_pages = obj->size >> PAGE_SHIFT;
> +	struct page *page;
> +
> +	if (pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
> +		return VM_FAULT_SIGBUS;
> +
> +	page = shmem->pages[pgoff];
> +
> +	return vmf_insert_page(vma, vmf->address, page);
> +}
> +
> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
> +{
> +	struct drm_gem_object *obj = vma->vm_private_data;
> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
> +
> +	drm_gem_shmem_put_pages(shmem);
> +	drm_gem_vm_close(vma);
> +}
> +
> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
> +	.fault = drm_gem_shmem_fault,
> +	.open = drm_gem_vm_open,
> +	.close = drm_gem_shmem_vm_close,
> +};
> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
> +
> +static int drm_gem_shmem_mmap_obj(struct drm_gem_shmem_object *shmem,
> +				  struct vm_area_struct *vma)
> +{
> +	int ret;
> +
> +	ret = drm_gem_shmem_get_pages(shmem);
> +	if (ret)
> +		goto err_close;
> +
> +	/* VM_PFNMAP was set by drm_gem_mmap() */
> +	vma->vm_flags &= ~VM_PFNMAP;
> +	vma->vm_flags |= VM_MIXEDMAP;
> +
> +	switch (shmem->cache_mode) {
> +	case DRM_GEM_SHMEM_BO_UNKNOWN:
> +		ret = -EINVAL;
Print to help the programmer?

> +		goto err_put_pages;
> +
> +	case DRM_GEM_SHMEM_BO_WRITECOMBINED:
> +		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> +		break;
> +
> +	case DRM_GEM_SHMEM_BO_UNCACHED:
> +		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
> +		break;
> +
> +	case DRM_GEM_SHMEM_BO_CACHED:
> +		/*
> +		 * Shunt off cached objs to shmem file so they have their own
> +		 * address_space (so unmap_mapping_range does what we want,
> +		 * in particular in the case of mmap'd dmabufs)
> +		 */
> +		fput(vma->vm_file);
> +		get_file(shmem->base.filp);
> +		vma->vm_pgoff = 0;
> +		vma->vm_file  = shmem->base.filp;
> +		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
> +		break;
> +	}
> +
> +	return 0;
> +
> +err_put_pages:
> +	drm_gem_shmem_put_pages(shmem);
> +err_close:
> +	drm_gem_vm_close(vma);
> +
> +	return ret;
> +}
> +
Noralf Trønnes Sept. 3, 2018, 2:22 p.m. UTC | #2
Den 02.09.2018 22.56, skrev Sam Ravnborg:
> Hi Noralf.
>
> Only nitpicks, I have not the background
> to review the actual implmentation.
> So no tags from me to put on the commit.
>
> 	Sam
>
>> +/**
>> + * drm_gem_shmem_create - Allocate an object with the given size
>> + * @dev: DRM device
>> + * @size: Size of the object to allocate
>> + *
>> + * This function creates a shmem GEM object. The default cache mode is
>> + * DRM_GEM_SHMEM_BO_CACHED. The &drm_driver->gem_create_object callback can be
>> + * used override this.
> used to override this.
>       ^^
>
>> + *
>> + * Returns:
>> + * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
>> + * error code on failure.
>> + */
>> +struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
>> +{
>> +	struct drm_gem_shmem_object *shmem;
>> +	struct drm_gem_object *obj;
>> +	int ret;
>> +
>> +	size = PAGE_ALIGN(size);
>> +
>> +	if (dev->driver->gem_create_object)
>> +		obj = dev->driver->gem_create_object(dev, size);
>> +	else
>> +		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
>> +	if (!obj)
>> +		return ERR_PTR(-ENOMEM);
>> +
>> +	shmem = to_drm_gem_shmem_obj(obj);
>> +
>> +	if (!dev->driver->gem_create_object)
>> +		shmem->cache_mode = DRM_GEM_SHMEM_BO_CACHED;
>> +
>> +	ret = drm_gem_object_init(dev, obj, size);
>> +	if (ret)
>> +		goto err_free;
> Some users of drm_gem_object_init() calls drm_gem_object_put_unlocked(obj)
> when there is an error. Others call kfree() liek in this case.

I could also have done that if I initialized the mutexes earlier to match
up with the destroy in *_free_object. But it doesn't buy me anything,
so I prefer to stay with how the majority does it.

I really like to use Elixir when tracking down how drivers use the
various DRM functions:
https://elixir.bootlin.com/linux/latest/ident/drm_gem_object_init

>> +
>> +	ret = drm_gem_create_mmap_offset(obj);
>> +	if (ret)
>> +		goto err_release;
>> +
>> +	mutex_init(&shmem->pages_lock);
>> +	mutex_init(&shmem->vmap_lock);
>> +
>> +	return shmem;
>> +
>> +err_release:
>> +	drm_gem_object_release(obj);
>> +err_free:
>> +	kfree(shmem);
>> +
>> +	return ERR_PTR(ret);
>> +}
>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
>> +
>> +
>> +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
>> +{
>> +	struct drm_gem_object *obj = &shmem->base;
>> +	struct page **pages;
>> +
>> +	if (shmem->pages_use_count++ > 0)
>> +		return 0;
>> +
>> +	pages = drm_gem_get_pages(obj);
>> +	if (IS_ERR(pages)) {
>> +		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
>> +		shmem->pages_use_count = 0;
>> +		return PTR_ERR(pages);
>> +	}
>> +
>> +	shmem->pages = pages;
>> +
>> +	return 0;
>> +}
>> +
>> +/*
>> + * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
>> + * @shmem: shmem GEM object
>> + *
>> + * This function makes sure that backing pages exists for the shmem GEM object
>> + * and increases the use count.
>> + *
>> + * Returns:
>> + * 0 on success or a negative error code on failure.
>> + */
>> +int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
>> +{
>> +	int ret;
>> +
>> +	ret = mutex_lock_interruptible(&shmem->pages_lock);
>> +	if (ret)
>> +		return ret;
>> +	ret = drm_gem_shmem_get_pages_locked(shmem);
>> +	mutex_unlock(&shmem->pages_lock);
>> +
>> +	return ret;
>> +}
>> +EXPORT_SYMBOL(drm_gem_shmem_get_pages);
>> +
> The functions is named *_unlocked, but called with a lock held.
> Inconsistent?

Indeed it is.

Thanks,
Noralf

>> +static void drm_gem_shmem_put_pages_unlocked(struct drm_gem_shmem_object *shmem)
>> +{
>> +	struct drm_gem_object *obj = &shmem->base;
>> +
>> +	if (WARN_ON_ONCE(!shmem->pages_use_count))
>> +		return;
>> +
>> +	if (--shmem->pages_use_count > 0)
>> +		return;
>> +
>> +	drm_gem_put_pages(obj, shmem->pages,
>> +			  shmem->pages_mark_dirty_on_put,
>> +			  shmem->pages_mark_accessed_on_put);
>> +	shmem->pages = NULL;
>> +}
>> +
>> +/*
>> + * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
>> + * @shmem: shmem GEM object
>> + *
>> + * This function decreases the use count and puts the backing pages when use drops to zero.
>> + */
>> +void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
>> +{
>> +	mutex_lock(&shmem->pages_lock);
>> +	drm_gem_shmem_put_pages_unlocked(shmem);
>> +	mutex_unlock(&shmem->pages_lock);
>> +}
>> +EXPORT_SYMBOL(drm_gem_shmem_put_pages);
>> +
>> +static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
>> +{
>> +	struct drm_gem_object *obj = &shmem->base;
>> +	int ret;
>> +
>> +	if (shmem->vmap_use_count++ > 0)
>> +		return 0;
>> +
>> +	ret = drm_gem_shmem_get_pages(shmem);
>> +	if (ret)
>> +		goto err_zero_use;
>> +
>> +	if (obj->import_attach) {
>> +		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
>> +	} else {
>> +		pgprot_t prot;
>> +
>> +		switch (shmem->cache_mode) {
>> +		case DRM_GEM_SHMEM_BO_UNKNOWN:
> No printout to help the coder that did not set this?
>
>> +			ret = -EINVAL;
>> +			goto err_put_pages;
>> +
>> +		case DRM_GEM_SHMEM_BO_WRITECOMBINED:
>> +			prot = pgprot_writecombine(PAGE_KERNEL);
>> +			break;
>> +
>> +		case DRM_GEM_SHMEM_BO_UNCACHED:
>> +			prot = pgprot_noncached(PAGE_KERNEL);
>> +			break;
>> +
>> +		case DRM_GEM_SHMEM_BO_CACHED:
>> +			prot = PAGE_KERNEL;
>> +			break;
>> +		}
>> +
>> +		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot);
>> +	}
>> +
>> +	if (!shmem->vaddr) {
>> +		DRM_DEBUG_KMS("Failed to vmap pages\n");
>> +		ret = -ENOMEM;
>> +		goto err_put_pages;
>> +	}
>> +
>> +	return 0;
>> +
>> +err_put_pages:
>> +	drm_gem_shmem_put_pages(shmem);
>> +err_zero_use:
>> +	shmem->vmap_use_count = 0;
>> +
>> +	return ret;
>> +}
>> +
>> +/*
>> + * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
>> + * @shmem: shmem GEM object
>> + *
>> + * This function makes sure that a virtual address exists for the buffer backing
>> + * the shmem GEM object.
>> + *
>> + * Returns:
>> + * 0 on success or a negative error code on failure.
>> + */
>> +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem)
>> +{
>> +	int ret;
>> +
>> +	ret = mutex_lock_interruptible(&shmem->vmap_lock);
>> +	if (ret)
>> +		return ret;
>> +	ret = drm_gem_shmem_vmap_locked(shmem);
>> +	mutex_unlock(&shmem->vmap_lock);
>> +
>> +	return ret;
>> +}
>> +EXPORT_SYMBOL(drm_gem_shmem_vmap);
>> +
>> +static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
>> +{
>> +	struct drm_gem_object *obj = &shmem->base;
>> +
>> +	if (WARN_ON_ONCE(!shmem->vmap_use_count))
>> +		return;
>> +
>> +	if (--shmem->vmap_use_count > 0)
>> +		return;
>> +
>> +	if (obj->import_attach)
>> +		dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
>> +	else
>> +		vunmap(shmem->vaddr);
>> +
>> +	shmem->vaddr = NULL;
>> +	drm_gem_shmem_put_pages(shmem);
>> +}
>> +
>> +/*
>> + * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
>> + * @shmem: shmem GEM object
>> + *
>> + * This function removes the virtual address when use count drops to zero.
>> + */
>> +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem)
>> +{
>> +	mutex_lock(&shmem->vmap_lock);
>> +	drm_gem_shmem_vunmap_locked(shmem);
>> +	mutex_unlock(&shmem->vmap_lock);
>> +}
>> +EXPORT_SYMBOL(drm_gem_shmem_vunmap);
>> +
>> +static struct drm_gem_shmem_object *
>> +drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
>> +				 struct drm_device *dev, size_t size,
>> +				 uint32_t *handle)
>> +{
>> +	struct drm_gem_shmem_object *shmem;
>> +	int ret;
>> +
>> +	shmem = drm_gem_shmem_create(dev, size);
>> +	if (IS_ERR(shmem))
>> +		return shmem;
>> +
>> +	/*
>> +	 * Allocate an id of idr table where the obj is registered
>> +	 * and handle has the id what user can see.
>> +	 */
>> +	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
>> +	/* drop reference from allocate - handle holds it now. */
>> +	drm_gem_object_put_unlocked(&shmem->base);
>> +	if (ret)
>> +		return ERR_PTR(ret);
>> +
>> +	return shmem;
>> +}
>> +
>> +/**
>> + * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
>> + * @file: DRM file structure to create the dumb buffer for
>> + * @dev: DRM device
>> + * @args: IOCTL data
>> + *
>> + * This function computes the pitch of the dumb buffer and rounds it up to an
>> + * integer number of bytes per pixel. Drivers for hardware that doesn't have
>> + * any additional restrictions on the pitch can directly use this function as
>> + * their &drm_driver.dumb_create callback.
>> + *
>> + * For hardware with additional restrictions, drivers can adjust the fields
>> + * set up by userspace before calling into this function.
>> + *
>> + * Returns:
>> + * 0 on success or a negative error code on failure.
>> + */
>> +int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
>> +			      struct drm_mode_create_dumb *args)
>> +{
>> +	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
>> +	struct drm_gem_shmem_object *shmem;
>> +
>> +	if (!args->pitch || !args->size) {
>> +		args->pitch = min_pitch;
>> +		args->size = args->pitch * args->height;
>> +	} else {
>> +		/* ensure sane minimum values */
>> +		if (args->pitch < min_pitch)
>> +			args->pitch = min_pitch;
>> +		if (args->size < args->pitch * args->height)
>> +			args->size = args->pitch * args->height;
>> +	}
>> +
>> +	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
>> +
>> +	return PTR_ERR_OR_ZERO(shmem);
>> +}
>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
>> +
>> +static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
>> +{
>> +	struct vm_area_struct *vma = vmf->vma;
>> +	struct drm_gem_object *obj = vma->vm_private_data;
>> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> +	/* We don't use vmf->pgoff since that has the fake offset: */
>> +	pgoff_t pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
>> +	loff_t num_pages = obj->size >> PAGE_SHIFT;
>> +	struct page *page;
>> +
>> +	if (pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
>> +		return VM_FAULT_SIGBUS;
>> +
>> +	page = shmem->pages[pgoff];
>> +
>> +	return vmf_insert_page(vma, vmf->address, page);
>> +}
>> +
>> +static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
>> +{
>> +	struct drm_gem_object *obj = vma->vm_private_data;
>> +	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
>> +
>> +	drm_gem_shmem_put_pages(shmem);
>> +	drm_gem_vm_close(vma);
>> +}
>> +
>> +const struct vm_operations_struct drm_gem_shmem_vm_ops = {
>> +	.fault = drm_gem_shmem_fault,
>> +	.open = drm_gem_vm_open,
>> +	.close = drm_gem_shmem_vm_close,
>> +};
>> +EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
>> +
>> +static int drm_gem_shmem_mmap_obj(struct drm_gem_shmem_object *shmem,
>> +				  struct vm_area_struct *vma)
>> +{
>> +	int ret;
>> +
>> +	ret = drm_gem_shmem_get_pages(shmem);
>> +	if (ret)
>> +		goto err_close;
>> +
>> +	/* VM_PFNMAP was set by drm_gem_mmap() */
>> +	vma->vm_flags &= ~VM_PFNMAP;
>> +	vma->vm_flags |= VM_MIXEDMAP;
>> +
>> +	switch (shmem->cache_mode) {
>> +	case DRM_GEM_SHMEM_BO_UNKNOWN:
>> +		ret = -EINVAL;
> Print to help the programmer?
>
>> +		goto err_put_pages;
>> +
>> +	case DRM_GEM_SHMEM_BO_WRITECOMBINED:
>> +		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
>> +		break;
>> +
>> +	case DRM_GEM_SHMEM_BO_UNCACHED:
>> +		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
>> +		break;
>> +
>> +	case DRM_GEM_SHMEM_BO_CACHED:
>> +		/*
>> +		 * Shunt off cached objs to shmem file so they have their own
>> +		 * address_space (so unmap_mapping_range does what we want,
>> +		 * in particular in the case of mmap'd dmabufs)
>> +		 */
>> +		fput(vma->vm_file);
>> +		get_file(shmem->base.filp);
>> +		vma->vm_pgoff = 0;
>> +		vma->vm_file  = shmem->base.filp;
>> +		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
>> +		break;
>> +	}
>> +
>> +	return 0;
>> +
>> +err_put_pages:
>> +	drm_gem_shmem_put_pages(shmem);
>> +err_close:
>> +	drm_gem_vm_close(vma);
>> +
>> +	return ret;
>> +}
>> +
diff mbox series

Patch

diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index f9cfcdcdf024..bc24b1b5216a 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -326,3 +326,15 @@  Legacy CRTC/Modeset Helper Functions Reference
 
 .. kernel-doc:: drivers/gpu/drm/drm_crtc_helper.c
    :export:
+
+SHMEM GEM Helper Reference
+==========================
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_shmem_helper.c
+   :doc: overview
+
+.. kernel-doc:: include/drm/drm_gem_shmem_helper.h
+   :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_shmem_helper.c
+   :export:
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cb88528e7b10..db588ae44bcc 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -157,6 +157,12 @@  config DRM_KMS_CMA_HELPER
 	help
 	  Choose this if you need the KMS CMA helper functions
 
+config DRM_GEM_SHMEM_HELPER
+	bool
+	depends on DRM
+	help
+	  Choose this if you need the GEM shmem helper functions
+
 config DRM_VM
 	bool
 	depends on DRM && MMU
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a6771cef85e2..c6798590799f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -24,6 +24,7 @@  drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
 drm-$(CONFIG_DRM_VM) += drm_vm.o
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
+drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
 drm-$(CONFIG_PCI) += ati_pcigart.o
 drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 drm-$(CONFIG_OF) += drm_of.o
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
new file mode 100644
index 000000000000..53181bb0e1cf
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -0,0 +1,676 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for GEM objects backed by shmem buffers
+ * allocated using anonymous pageable memory.
+ */
+
+/**
+ * drm_gem_shmem_create - Allocate an object with the given size
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This function creates a shmem GEM object. The default cache mode is
+ * DRM_GEM_SHMEM_BO_CACHED. The &drm_driver->gem_create_object callback can be
+ * used override this.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+{
+	struct drm_gem_shmem_object *shmem;
+	struct drm_gem_object *obj;
+	int ret;
+
+	size = PAGE_ALIGN(size);
+
+	if (dev->driver->gem_create_object)
+		obj = dev->driver->gem_create_object(dev, size);
+	else
+		obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
+	if (!obj)
+		return ERR_PTR(-ENOMEM);
+
+	shmem = to_drm_gem_shmem_obj(obj);
+
+	if (!dev->driver->gem_create_object)
+		shmem->cache_mode = DRM_GEM_SHMEM_BO_CACHED;
+
+	ret = drm_gem_object_init(dev, obj, size);
+	if (ret)
+		goto err_free;
+
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret)
+		goto err_release;
+
+	mutex_init(&shmem->pages_lock);
+	mutex_init(&shmem->vmap_lock);
+
+	return shmem;
+
+err_release:
+	drm_gem_object_release(obj);
+err_free:
+	kfree(shmem);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
+
+/**
+ * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
+ * @obj: GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself. Drivers using the shmem helpers should set this as
+ * their &drm_driver.gem_free_object_unlocked callback.
+ */
+void drm_gem_shmem_free_object(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	WARN_ON(shmem->vmap_use_count);
+
+	if (obj->import_attach) {
+		shmem->pages_use_count--;
+		drm_prime_gem_destroy(obj, shmem->sgt);
+		kvfree(shmem->pages);
+	}
+
+	WARN_ON(shmem->pages_use_count);
+
+	drm_gem_object_release(obj);
+	mutex_destroy(&shmem->pages_lock);
+	mutex_destroy(&shmem->vmap_lock);
+	kfree(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
+
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+	struct page **pages;
+
+	if (shmem->pages_use_count++ > 0)
+		return 0;
+
+	pages = drm_gem_get_pages(obj);
+	if (IS_ERR(pages)) {
+		DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+		shmem->pages_use_count = 0;
+		return PTR_ERR(pages);
+	}
+
+	shmem->pages = pages;
+
+	return 0;
+}
+
+/*
+ * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that backing pages exists for the shmem GEM object
+ * and increases the use count.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&shmem->pages_lock);
+	if (ret)
+		return ret;
+	ret = drm_gem_shmem_get_pages_locked(shmem);
+	mutex_unlock(&shmem->pages_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_shmem_get_pages);
+
+static void drm_gem_shmem_put_pages_unlocked(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+
+	if (WARN_ON_ONCE(!shmem->pages_use_count))
+		return;
+
+	if (--shmem->pages_use_count > 0)
+		return;
+
+	drm_gem_put_pages(obj, shmem->pages,
+			  shmem->pages_mark_dirty_on_put,
+			  shmem->pages_mark_accessed_on_put);
+	shmem->pages = NULL;
+}
+
+/*
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function decreases the use count and puts the backing pages when use drops to zero.
+ */
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+{
+	mutex_lock(&shmem->pages_lock);
+	drm_gem_shmem_put_pages_unlocked(shmem);
+	mutex_unlock(&shmem->pages_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+
+static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+	int ret;
+
+	if (shmem->vmap_use_count++ > 0)
+		return 0;
+
+	ret = drm_gem_shmem_get_pages(shmem);
+	if (ret)
+		goto err_zero_use;
+
+	if (obj->import_attach) {
+		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+	} else {
+		pgprot_t prot;
+
+		switch (shmem->cache_mode) {
+		case DRM_GEM_SHMEM_BO_UNKNOWN:
+			ret = -EINVAL;
+			goto err_put_pages;
+
+		case DRM_GEM_SHMEM_BO_WRITECOMBINED:
+			prot = pgprot_writecombine(PAGE_KERNEL);
+			break;
+
+		case DRM_GEM_SHMEM_BO_UNCACHED:
+			prot = pgprot_noncached(PAGE_KERNEL);
+			break;
+
+		case DRM_GEM_SHMEM_BO_CACHED:
+			prot = PAGE_KERNEL;
+			break;
+		}
+
+		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot);
+	}
+
+	if (!shmem->vaddr) {
+		DRM_DEBUG_KMS("Failed to vmap pages\n");
+		ret = -ENOMEM;
+		goto err_put_pages;
+	}
+
+	return 0;
+
+err_put_pages:
+	drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+	shmem->vmap_use_count = 0;
+
+	return ret;
+}
+
+/*
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that a virtual address exists for the buffer backing
+ * the shmem GEM object.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&shmem->vmap_lock);
+	if (ret)
+		return ret;
+	ret = drm_gem_shmem_vmap_locked(shmem);
+	mutex_unlock(&shmem->vmap_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
+
+static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
+{
+	struct drm_gem_object *obj = &shmem->base;
+
+	if (WARN_ON_ONCE(!shmem->vmap_use_count))
+		return;
+
+	if (--shmem->vmap_use_count > 0)
+		return;
+
+	if (obj->import_attach)
+		dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+	else
+		vunmap(shmem->vaddr);
+
+	shmem->vaddr = NULL;
+	drm_gem_shmem_put_pages(shmem);
+}
+
+/*
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function removes the virtual address when use count drops to zero.
+ */
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem)
+{
+	mutex_lock(&shmem->vmap_lock);
+	drm_gem_shmem_vunmap_locked(shmem);
+	mutex_unlock(&shmem->vmap_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+
+static struct drm_gem_shmem_object *
+drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
+				 struct drm_device *dev, size_t size,
+				 uint32_t *handle)
+{
+	struct drm_gem_shmem_object *shmem;
+	int ret;
+
+	shmem = drm_gem_shmem_create(dev, size);
+	if (IS_ERR(shmem))
+		return shmem;
+
+	/*
+	 * Allocate an id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_put_unlocked(&shmem->base);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return shmem;
+}
+
+/**
+ * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
+ * @file: DRM file structure to create the dumb buffer for
+ * @dev: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their &drm_driver.dumb_create callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace before calling into this function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+			      struct drm_mode_create_dumb *args)
+{
+	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+	struct drm_gem_shmem_object *shmem;
+
+	if (!args->pitch || !args->size) {
+		args->pitch = min_pitch;
+		args->size = args->pitch * args->height;
+	} else {
+		/* ensure sane minimum values */
+		if (args->pitch < min_pitch)
+			args->pitch = min_pitch;
+		if (args->size < args->pitch * args->height)
+			args->size = args->pitch * args->height;
+	}
+
+	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
+
+	return PTR_ERR_OR_ZERO(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+	/* We don't use vmf->pgoff since that has the fake offset: */
+	pgoff_t pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+	loff_t num_pages = obj->size >> PAGE_SHIFT;
+	struct page *page;
+
+	if (pgoff > num_pages || WARN_ON_ONCE(!shmem->pages))
+		return VM_FAULT_SIGBUS;
+
+	page = shmem->pages[pgoff];
+
+	return vmf_insert_page(vma, vmf->address, page);
+}
+
+static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	drm_gem_shmem_put_pages(shmem);
+	drm_gem_vm_close(vma);
+}
+
+const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+	.fault = drm_gem_shmem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_shmem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
+
+static int drm_gem_shmem_mmap_obj(struct drm_gem_shmem_object *shmem,
+				  struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_shmem_get_pages(shmem);
+	if (ret)
+		goto err_close;
+
+	/* VM_PFNMAP was set by drm_gem_mmap() */
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
+	switch (shmem->cache_mode) {
+	case DRM_GEM_SHMEM_BO_UNKNOWN:
+		ret = -EINVAL;
+		goto err_put_pages;
+
+	case DRM_GEM_SHMEM_BO_WRITECOMBINED:
+		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+		break;
+
+	case DRM_GEM_SHMEM_BO_UNCACHED:
+		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+		break;
+
+	case DRM_GEM_SHMEM_BO_CACHED:
+		/*
+		 * Shunt off cached objs to shmem file so they have their own
+		 * address_space (so unmap_mapping_range does what we want,
+		 * in particular in the case of mmap'd dmabufs)
+		 */
+		fput(vma->vm_file);
+		get_file(shmem->base.filp);
+		vma->vm_pgoff = 0;
+		vma->vm_file  = shmem->base.filp;
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+		break;
+	}
+
+	return 0;
+
+err_put_pages:
+	drm_gem_shmem_put_pages(shmem);
+err_close:
+	drm_gem_vm_close(vma);
+
+	return ret;
+}
+
+/**
+ * drm_gem_shmem_mmap - Memory-map a shmem GEM object
+ * @filp: File object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for shmem objects. Drivers which employ the shmem helpers should
+ * use this function as their &file_operations.mmap handler in the DRM device file's
+ * file_operations structure.
+ *
+ * Instead of directly referencing this function, drivers should use the
+ * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_gem_shmem_object *shmem;
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret)
+		return ret;
+
+	shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+
+	return drm_gem_shmem_mmap_obj(shmem, vma);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
+
+static const char * const cache_mode_str[] = {
+	[DRM_GEM_SHMEM_BO_UNKNOWN] = "unknown",
+	[DRM_GEM_SHMEM_BO_UNCACHED] = "uncached",
+	[DRM_GEM_SHMEM_BO_CACHED] = "cached",
+	[DRM_GEM_SHMEM_BO_WRITECOMBINED] = "writecombined",
+};
+
+/**
+ * drm_gem_shmem_print_info() - Print &drm_gem_cma_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ *
+ * This function can be used as the &drm_driver->gem_print_info callback.
+ */
+void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
+			      const struct drm_gem_object *obj)
+{
+	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	drm_printf_indent(p, indent, "cachemode=%s\n", cache_mode_str[shmem->cache_mode]);
+	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
+	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
+	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+}
+EXPORT_SYMBOL(drm_gem_shmem_print_info);
+
+/**
+ * drm_gem_shmem_prime_pin - Pin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function makes sure the backing pages are pinned in memory while the
+ * buffer is exported. Drivers using the shmem helpers should set this as their
+ * DRM driver's &drm_driver.gem_prime_pin callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_prime_pin(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	return drm_gem_shmem_get_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_prime_pin);
+
+/**
+ * drm_gem_shmem_prime_unpin - Unpin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function removes the requirement that the backing pages are pinned in
+ * memory. Drivers using the shmem helpers should set this as their DRM driver's
+ * &drm_driver.gem_prime_unpin callback.
+ */
+void drm_gem_shmem_prime_unpin(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	drm_gem_shmem_put_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_prime_unpin);
+
+/**
+ * drm_gem_shmem_prime_get_sg_table - Provide a scatter/gather table of pinned
+ *                                    pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API. Drivers using the shmem helpers should
+ * set this as their &drm_driver.gem_prime_get_sg_table callback.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+struct sg_table *drm_gem_shmem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_get_sg_table);
+
+/**
+ * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
+ *                 another driver's scatter/gather table of pinned pages
+ * @dev: Device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Drivers that use the shmem helpers should set this as their
+ * &drm_driver.gem_prime_import_sg_table callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
+				    struct dma_buf_attachment *attach,
+				    struct sg_table *sgt)
+{
+	size_t size = PAGE_ALIGN(attach->dmabuf->size);
+	size_t npages = size >> PAGE_SHIFT;
+	struct drm_gem_shmem_object *shmem;
+	int ret;
+
+	shmem = drm_gem_shmem_create(dev, size);
+	if (IS_ERR(shmem))
+		return ERR_CAST(shmem);
+
+	shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+	if (!shmem->pages) {
+		ret = -ENOMEM;
+		goto err_free_gem;
+	}
+
+	ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
+	if (ret < 0)
+		goto err_free_array;
+
+	shmem->sgt = sgt;
+	shmem->cache_mode = DRM_GEM_SHMEM_BO_UNKNOWN;
+	shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
+
+	DRM_DEBUG_PRIME("size = %zu\n", size);
+
+	return &shmem->base;
+
+err_free_array:
+	kvfree(shmem->pages);
+err_free_gem:
+	drm_gem_object_put_unlocked(&shmem->base);
+
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
+
+/**
+ * drm_gem_shmem_prime_mmap - Memory-map an exported shmem GEM object
+ * @obj: GEM object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function maps a buffer imported via DRM PRIME into a userspace
+ * process's address space. Drivers that use the shmem helpers should set this
+ * as their &drm_driver.gem_prime_mmap callback.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+	int ret;
+
+	ret = drm_gem_mmap_obj(obj, obj->size, vma);
+	if (ret)
+		return ret;
+
+	return drm_gem_shmem_mmap_obj(shmem, vma);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_mmap);
+
+/**
+ * drm_gem_shmem_prime_vmap - Map a shmem GEM object into the kernel's virtual
+ *                            address space
+ * @obj: GEM object
+ *
+ * This function maps a buffer exported via DRM PRIME into the kernel's
+ * virtual address space. Drivers using the shmem helpers should set this as
+ * their DRM driver's &drm_driver.gem_prime_vmap callback.
+ *
+ * Returns:
+ * The kernel virtual address of the shmem GEM object's backing store or NULL on error.
+ */
+void *drm_gem_shmem_prime_vmap(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	drm_gem_shmem_vmap(shmem);
+
+	return shmem->vaddr;
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_vmap);
+
+/**
+ * drm_gem_shmem_prime_vunmap - Unmap a shmem GEM object from the kernel's
+ *                              virtual address space
+ * @obj: GEM object
+ * @vaddr: kernel virtual address where the shmem GEM object was mapped
+ *
+ * This function removes a buffer exported via DRM PRIME from the kernel's
+ * virtual address space. Drivers using the shmem helpers should set this as
+ * their &drm_driver.gem_prime_vunmap callback.
+ */
+void drm_gem_shmem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	drm_gem_shmem_vunmap(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_vunmap);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
new file mode 100644
index 000000000000..8dfcb2c3d667
--- /dev/null
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -0,0 +1,198 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DRM_GEM_SHMEM_HELPER_H__
+#define __DRM_GEM_SHMEM_HELPER_H__
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+
+struct dma_buf_attachment;
+struct drm_mode_create_dumb;
+struct drm_printer;
+struct sg_table;
+
+/**
+ * enum drm_gem_shmem_cache_mode - shmem buffer cache mode
+ */
+enum drm_gem_shmem_cache_mode {
+	/**
+	 * @DRM_GEM_SHMEM_BO_UNKNOWN:
+	 *
+	 * Cache mode is not known. This is the case when importing a buffer.
+	 */
+	DRM_GEM_SHMEM_BO_UNKNOWN = 0,
+
+	/**
+	 * @DRM_GEM_SHMEM_BO_UNCACHED: Buffer is uncached.
+	 */
+	DRM_GEM_SHMEM_BO_UNCACHED,
+
+	/**
+	 * @DRM_GEM_SHMEM_BO_CACHED: Buffer is cached.
+	 */
+	DRM_GEM_SHMEM_BO_CACHED,
+
+	/**
+	 * @DRM_GEM_SHMEM_BO_WRITECOMBINED: Buffer is uncached with writes combined.
+	 */
+	DRM_GEM_SHMEM_BO_WRITECOMBINED,
+};
+
+/**
+ * struct drm_gem_shmem_object - GEM object backed by shmem
+ */
+struct drm_gem_shmem_object {
+	/**
+	 * @base: Base GEM object
+	 */
+	struct drm_gem_object base;
+
+	/**
+	 * @cache_mode: Cache mode
+	 */
+	enum drm_gem_shmem_cache_mode cache_mode;
+
+	/**
+	 * @pages_lock: Protects the page table and use count
+	 */
+	struct mutex pages_lock;
+
+	/**
+	 * @pages: Page table
+	 */
+	struct page **pages;
+
+	/**
+	 * @pages_use_count:
+	 *
+	 * Reference count on the pages table.
+	 * The pages are put when the count reaches zero.
+	 */
+	unsigned int pages_use_count;
+
+	/**
+	 * @pages_mark_dirty_on_put:
+	 *
+	 * Mark pages as dirty when they are put.
+	 */
+	unsigned int pages_mark_dirty_on_put    : 1;
+
+	/**
+	 * @pages_mark_accessed_on_put:
+	 *
+	 * Mark pages as accessed when they are put.
+	 */
+	unsigned int pages_mark_accessed_on_put : 1;
+
+	/**
+	 * @sgt: Scatter/gather table for imported PRIME buffers
+	 */
+	struct sg_table *sgt;
+
+	/**
+	 * @vmap_lock: Protects the vmap address and use count
+	 */
+	struct mutex vmap_lock;
+
+	/**
+	 * @vaddr: Kernel virtual address of the backing memory
+	 */
+	void *vaddr;
+
+	/**
+	 * @vmap_use_count:
+	 *
+	 * Reference count on the virtual address.
+	 * The address are un-mapped when the count reaches zero.
+	 */
+	unsigned int vmap_use_count;
+};
+
+#define to_drm_gem_shmem_obj(obj) \
+	container_of(obj, struct drm_gem_shmem_object, base)
+
+/**
+ * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for shmem based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
+	static const struct file_operations name = {\
+		.owner		= THIS_MODULE,\
+		.open		= drm_open,\
+		.release	= drm_release,\
+		.unlocked_ioctl	= drm_ioctl,\
+		.compat_ioctl	= drm_compat_ioctl,\
+		.poll		= drm_poll,\
+		.read		= drm_read,\
+		.llseek		= noop_llseek,\
+		.mmap		= drm_gem_shmem_mmap, \
+	}
+
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
+void drm_gem_shmem_free_object(struct drm_gem_object *obj);
+
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem);
+
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+			      struct drm_mode_create_dumb *args);
+
+int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+
+void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
+			      const struct drm_gem_object *obj);
+
+int drm_gem_shmem_prime_pin(struct drm_gem_object *obj);
+void drm_gem_shmem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *drm_gem_shmem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
+				    struct dma_buf_attachment *attach,
+				    struct sg_table *sgt);
+int drm_gem_shmem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+void *drm_gem_shmem_prime_vmap(struct drm_gem_object *obj);
+void drm_gem_shmem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+/**
+ * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
+ *
+ * This macro provides a shortcut for setting the shmem GEM operations in
+ * the &drm_driver structure.
+ */
+#define DRM_GEM_SHMEM_DRIVER_OPS \
+	.gem_free_object_unlocked = drm_gem_shmem_free_object, \
+	.gem_print_info		= drm_gem_shmem_print_info, \
+	.gem_vm_ops		= &drm_gem_shmem_vm_ops, \
+	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
+	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
+	.gem_prime_pin		= drm_gem_shmem_prime_pin, \
+	.gem_prime_unpin	= drm_gem_shmem_prime_unpin, \
+	.gem_prime_import	= drm_gem_prime_import, \
+	.gem_prime_export	= drm_gem_prime_export, \
+	.gem_prime_get_sg_table	= drm_gem_shmem_prime_get_sg_table, \
+	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
+	.gem_prime_vmap		= drm_gem_shmem_prime_vmap, \
+	.gem_prime_vunmap	= drm_gem_shmem_prime_vunmap, \
+	.gem_prime_mmap		= drm_gem_shmem_prime_mmap, \
+	.dumb_create		= drm_gem_shmem_dumb_create
+
+#endif /* __DRM_GEM_SHMEM_HELPER_H__ */