diff mbox series

[1/5] drm/i915/dmabuf: dmabuf cleanup

Message ID 20200422212519.36276-2-michael.j.ruhl@intel.com (mailing list archive)
State New, archived
Headers show
Series [1/5] drm/i915/dmabuf: dmabuf cleanup | expand

Commit Message

Michael J. Ruhl April 22, 2020, 9:25 p.m. UTC
Some minor cleanup of some variables to make upcoming patches
a little easier.

Normalize struct sg_table to sgt.
Normalize struct dma_buf_attachment to attach.
checkpatch issues sizeof(), !NULL updates.

Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 58 +++++++++++-----------
 1 file changed, 29 insertions(+), 29 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 7db5a793739d..0d9124ad549a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -17,11 +17,11 @@  static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 	return to_intel_bo(buf->priv);
 }
 
-static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach,
 					     enum dma_data_direction dir)
 {
-	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-	struct sg_table *st;
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
+	struct sg_table *sgt;
 	struct scatterlist *src, *dst;
 	int ret, i;
 
@@ -30,54 +30,54 @@  static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 		goto err;
 
 	/* Copy sg so that we make an independent mapping */
-	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (st == NULL) {
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt) {
 		ret = -ENOMEM;
 		goto err_unpin_pages;
 	}
 
-	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+	ret = sg_alloc_table(sgt, obj->mm.pages->nents, GFP_KERNEL);
 	if (ret)
 		goto err_free;
 
 	src = obj->mm.pages->sgl;
-	dst = st->sgl;
+	dst = sgt->sgl;
 	for (i = 0; i < obj->mm.pages->nents; i++) {
 		sg_set_page(dst, sg_page(src), src->length, 0);
 		dst = sg_next(dst);
 		src = sg_next(src);
 	}
 
-	if (!dma_map_sg_attrs(attachment->dev,
-			      st->sgl, st->nents, dir,
+	if (!dma_map_sg_attrs(attach->dev,
+			      sgt->sgl, sgt->nents, dir,
 			      DMA_ATTR_SKIP_CPU_SYNC)) {
 		ret = -ENOMEM;
 		goto err_free_sg;
 	}
 
-	return st;
+	return sgt;
 
 err_free_sg:
-	sg_free_table(st);
+	sg_free_table(sgt);
 err_free:
-	kfree(st);
+	kfree(sgt);
 err_unpin_pages:
 	i915_gem_object_unpin_pages(obj);
 err:
 	return ERR_PTR(ret);
 }
 
-static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
-				   struct sg_table *sg,
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+				   struct sg_table *sgt,
 				   enum dma_data_direction dir)
 {
-	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+	struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
 
-	dma_unmap_sg_attrs(attachment->dev,
-			   sg->sgl, sg->nents, dir,
+	dma_unmap_sg_attrs(attach->dev,
+			   sgt->sgl, sgt->nents, dir,
 			   DMA_ATTR_SKIP_CPU_SYNC);
-	sg_free_table(sg);
-	kfree(sg);
+	sg_free_table(sgt);
+	kfree(sgt);
 
 	i915_gem_object_unpin_pages(obj);
 }
@@ -194,25 +194,25 @@  struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
-	struct sg_table *pages;
+	struct sg_table *sgt;
 	unsigned int sg_page_sizes;
 
-	pages = dma_buf_map_attachment(obj->base.import_attach,
-				       DMA_BIDIRECTIONAL);
-	if (IS_ERR(pages))
-		return PTR_ERR(pages);
+	sgt = dma_buf_map_attachment(obj->base.import_attach,
+				     DMA_BIDIRECTIONAL);
+	if (IS_ERR(sgt))
+		return PTR_ERR(sgt);
 
-	sg_page_sizes = i915_sg_page_sizes(pages->sgl);
+	sg_page_sizes = i915_sg_page_sizes(sgt->sgl);
 
-	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
+	__i915_gem_object_set_pages(obj, sgt, sg_page_sizes);
 
 	return 0;
 }
 
 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
-					     struct sg_table *pages)
+					     struct sg_table *sgt)
 {
-	dma_buf_unmap_attachment(obj->base.import_attach, pages,
+	dma_buf_unmap_attachment(obj->base.import_attach, sgt,
 				 DMA_BIDIRECTIONAL);
 }
 
@@ -250,7 +250,7 @@  struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 	get_dma_buf(dma_buf);
 
 	obj = i915_gem_object_alloc();
-	if (obj == NULL) {
+	if (!obj) {
 		ret = -ENOMEM;
 		goto fail_detach;
 	}