@@ -444,7 +444,6 @@ static void a3xx_dump(struct msm_gpu *gpu)
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
- .last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
.irq = a3xx_irq,
@@ -532,7 +532,6 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
.pm_suspend = a4xx_pm_suspend,
.pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
- .last_fence = adreno_last_fence,
.submit = adreno_submit,
.flush = adreno_flush,
.irq = a4xx_irq,
@@ -98,7 +98,6 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = gpu->rb;
unsigned int i, ibs = 0;
@@ -125,8 +124,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT7(ring, CP_EVENT_WRITE, 4);
OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
- OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence)));
- OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence)));
+ OUT_RING(ring, lower_32_bits(rbmemptr(gpu, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(gpu, fence)));
OUT_RING(ring, submit->fence->seqno);
gpu->funcs->flush(gpu);
@@ -804,7 +803,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
struct msm_drm_private *priv = dev->dev_private;
dev_err(dev->dev, "gpu fault fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- gpu->funcs->last_fence(gpu),
+ gpu->memptrs->fence,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
@@ -992,7 +991,6 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
.pm_suspend = a5xx_pm_suspend,
.pm_resume = a5xx_pm_resume,
.recover = a5xx_recover,
- .last_fence = adreno_last_fence,
.submit = a5xx_submit,
.flush = adreno_flush,
.irq = a5xx_irq,
@@ -82,8 +82,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
gpu->rb->cur = gpu->rb->start;
/* reset completed fence seqno: */
- adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
- adreno_gpu->memptrs->rptr = 0;
+ gpu->memptrs->fence = gpu->fctx->completed_fence;
+ gpu->memptrs->rptr = 0;
/* Setup REG_CP_RB_CNTL: */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
@@ -98,8 +98,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
if (!adreno_is_a430(adreno_gpu)) {
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- rbmemptr(adreno_gpu, rptr));
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu, rptr));
}
return 0;
@@ -113,17 +112,13 @@ static uint32_t get_wptr(struct msm_ringbuffer *ring)
/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
if (adreno_is_a430(adreno_gpu))
- return adreno_gpu->memptrs->rptr = adreno_gpu_read(
+ return gpu->memptrs->rptr = adreno_gpu_read(
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
else
- return adreno_gpu->memptrs->rptr;
-}
-
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
-{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- return adreno_gpu->memptrs->fence;
+ return gpu->memptrs->rptr;
}
void adreno_recover(struct msm_gpu *gpu)
@@ -188,7 +183,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+ OUT_RING(ring, rbmemptr(gpu, fence));
OUT_RING(ring, submit->fence->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -261,7 +256,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
+ seq_printf(m, "fence: %d/%d\n", gpu->memptrs->fence,
gpu->fctx->last_fence);
seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
@@ -296,7 +291,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
+ printk("fence: %d/%d\n", gpu->memptrs->fence,
gpu->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("rb wptr: %d\n", get_wptr(gpu->rb));
@@ -385,40 +380,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
- if (ret) {
+ if (ret)
dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
adreno_gpu->info->pfpfw, ret);
- return ret;
- }
-
- adreno_gpu->memptrs = msm_gem_kernel_new(drm,
- sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace,
- &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
-
- if (IS_ERR(adreno_gpu->memptrs)) {
- ret = PTR_ERR(adreno_gpu->memptrs);
- adreno_gpu->memptrs = NULL;
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
- }
return ret;
}
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if (adreno_gpu->memptrs_bo) {
- if (adreno_gpu->memptrs)
- msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
-
- if (adreno_gpu->memptrs_iova)
- msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
-
- drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
- }
release_firmware(adreno_gpu->pm4);
release_firmware(adreno_gpu->pfp);
- msm_gpu_cleanup(gpu);
+ msm_gpu_cleanup(&adreno_gpu->base);
}
@@ -82,14 +82,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
-struct adreno_rbmemptrs {
- volatile uint32_t rptr;
- volatile uint32_t fence;
-};
-
struct adreno_gpu {
struct msm_gpu base;
struct adreno_rev rev;
@@ -104,13 +96,6 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *pm4, *pfp;
- /* ringbuffer rptr/wptr: */
- // TODO should this be in msm_ringbuffer? I think it would be
- // different for z180..
- struct adreno_rbmemptrs *memptrs;
- struct drm_gem_object *memptrs_bo;
- uint64_t memptrs_iova;
-
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
@@ -197,7 +182,6 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
@@ -228,7 +228,7 @@ static void recover_worker(struct work_struct *work)
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
struct msm_gem_submit *submit;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ uint32_t fence = gpu->memptrs->fence;
msm_update_fence(gpu->fctx, fence + 1);
@@ -281,7 +281,7 @@ static void hangcheck_handler(unsigned long data)
struct msm_gpu *gpu = (struct msm_gpu *)data;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ uint32_t fence = gpu->memptrs->fence;
if (fence != gpu->hangcheck_fence) {
/* some progress has been made.. ya! */
@@ -449,7 +449,7 @@ static void retire_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
struct drm_device *dev = gpu->dev;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ uint32_t fence = gpu->memptrs->fence;
msm_update_fence(gpu->fctx, fence);
@@ -689,6 +689,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
+ gpu->memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+ MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
+ &gpu->memptrs_iova);
+
+ if (IS_ERR(gpu->memptrs)) {
+ ret = PTR_ERR(gpu->memptrs);
+ gpu->memptrs = NULL;
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+ goto fail;
+ }
+
/* Create ringbuffer: */
gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
if (IS_ERR(gpu->rb)) {
@@ -701,6 +712,12 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return 0;
fail:
+ if (gpu->memptrs_bo) {
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ }
+
platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -718,6 +735,13 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb);
}
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_vaddr(gpu->memptrs_bo);
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ }
+
if (gpu->aspace) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
NULL, 0);
@@ -59,7 +59,6 @@ struct msm_gpu_funcs {
struct msm_file_private *ctx);
void (*flush)(struct msm_gpu *gpu);
irqreturn_t (*irq)(struct msm_gpu *irq);
- uint32_t (*last_fence)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
@@ -68,6 +67,14 @@ struct msm_gpu_funcs {
#endif
};
+#define rbmemptr(gpu, member) \
+ ((gpu)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
+
+struct msm_rbmemptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t fence;
+};
+
struct msm_gpu {
const char *name;
struct drm_device *dev;
@@ -130,11 +137,17 @@ struct msm_gpu {
struct work_struct recover_work;
struct list_head submit_list;
+
+ struct msm_rbmemptrs *memptrs;
+ struct drm_gem_object *memptrs_bo;
+ uint64_t memptrs_iova;
+
+
};
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
+ return gpu->fctx->last_fence > gpu->memptrs->fence;
}
/* Perf-Counters:
When we move to multiple ringbuffers we're going to store the data in the memptrs on a per-ring basis. In order to prepare for that move the current memptrs from the adreno namespace into msm_gpu. This is way cleaner and immediately lets us kill off some sub functions so there is much less cost later when we do move to per-ring structs. Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org> --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 1 - drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 1 - drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 8 ++--- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 52 ++++++++------------------------- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 16 ---------- drivers/gpu/drm/msm/msm_gpu.c | 30 +++++++++++++++++-- drivers/gpu/drm/msm/msm_gpu.h | 17 +++++++++-- 7 files changed, 57 insertions(+), 68 deletions(-)