@@ -511,6 +511,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
+
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
@@ -518,6 +519,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
}
if (!a5xx_gpu->pfp_bo) {
@@ -531,6 +534,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
}
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -1219,10 +1224,10 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
&dumper->bo, &dumper->iova);
- if (IS_ERR(dumper->ptr))
- return PTR_ERR(dumper->ptr);
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
- return 0;
+ return PTR_ERR_OR_ZERO(dumper->ptr);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -300,6 +300,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
if (IS_ERR(ptr))
return;
+ msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
+
while (cmds_size > 0) {
int i;
uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
@@ -245,6 +245,8 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ msm_gem_object_set_name(bo, "preempt");
+
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@@ -300,6 +300,8 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
return ret;
}
+
+ msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -1083,6 +1083,8 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
return PTR_ERR(data);
}
+ msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
msm_host->tx_size = msm_host->tx_gem_obj->size;
return 0;
@@ -325,6 +325,8 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
@@ -237,6 +237,8 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
return ERR_CAST(bo);
}
+ msm_gem_object_set_name(bo, "stolenfb");
+
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
@@ -491,7 +491,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->pitch = align_pitch(args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
return msm_gem_new_handle(dev, file, args->size,
- MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -794,7 +794,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
- seq_printf(m, " %08zu%9s\n", obj->size, madv);
+ seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
if (!list_empty(&msm_obj->vmas)) {
@@ -833,7 +833,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
int count = 0;
size_t size = 0;
- seq_puts(m, " flags id ref offset kaddr size madv\n");
+ seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, mm_list) {
struct drm_gem_object *obj = &msm_obj->base;
seq_puts(m, " ");
@@ -1147,3 +1147,16 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
else
drm_gem_object_put_unlocked(bo);
}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(bo);
+ va_list ap;
+
+ if (!fmt)
+ return;
+
+ va_start(ap, fmt);
+ vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+ va_end(ap);
+}
@@ -93,6 +93,8 @@ struct msm_gem_object {
*/
struct drm_mm_node *vram_node;
struct mutex lock; /* Protects resources associated with bo */
+
+ char name[32]; /* Identifier to print for the debugfs files */
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@@ -930,6 +930,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
+ msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
+
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
ARRAY_SIZE(gpu->rb));
@@ -45,6 +45,9 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->start = 0;
goto fail;
}
+
+ msm_gem_object_set_name(ring->bo, "ring%d", id);
+
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start;
For debugging purposes it is useful to assign descriptions to buffers so that we know what they are used for. Add a field to the buffer object and use that to name the various kernel side allocations which ends up looking like like this in /d/dri/X/gem: flags id ref offset kaddr size madv name 00040000: I 0 ( 1) 00000000 0000000070b79eca 00004096 memptrs vmas: [gpu: 01000000,mapped,inuse=1] 00020000: I 0 ( 1) 00000000 0000000031ed4074 00032768 ring0 Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org> --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 11 ++++++++--- drivers/gpu/drm/msm/adreno/a5xx_power.c | 2 ++ drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 2 ++ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 2 ++ drivers/gpu/drm/msm/dsi/dsi_host.c | 2 ++ drivers/gpu/drm/msm/msm_drv.h | 2 ++ drivers/gpu/drm/msm/msm_fb.c | 2 ++ drivers/gpu/drm/msm/msm_gem.c | 19 ++++++++++++++++--- drivers/gpu/drm/msm/msm_gem.h | 2 ++ drivers/gpu/drm/msm/msm_gpu.c | 2 ++ drivers/gpu/drm/msm/msm_ringbuffer.c | 3 +++ 11 files changed, 43 insertions(+), 6 deletions(-)