Message ID | 1527762711-18831-1-git-send-email-smasetty@codeaurora.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, May 31, 2018 at 04:01:51PM +0530, Sharat Masetty wrote: > This patch adds a simple helper function to help write 64 bit payloads > to the ringbuffer. > > Signed-off-by: Sharat Masetty <smasetty@codeaurora.org> > --- > drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 12 ++++-------- > drivers/gpu/drm/msm/adreno/a5xx_power.c | 3 +-- > drivers/gpu/drm/msm/msm_ringbuffer.h | 6 ++++++ > 3 files changed, 11 insertions(+), 10 deletions(-) > > diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c > index 5cdf104..7a58a01 100644 > --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c > +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c > @@ -223,8 +223,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, > > /* Set the save preemption record for the ring/command */ > OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2); > - OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); > - OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); > + OUT_RING64(ring, a5xx_gpu->preempt_iova[submit->ring->id]); > > /* Turn back on protected mode */ > OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); > @@ -248,8 +247,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, > break; > case MSM_SUBMIT_CMD_BUF: > OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); > - OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); > - OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); > + OUT_RING64(ring, submit->cmd[i].iova); > OUT_RING(ring, submit->cmd[i].size); > ibs++; > break; > @@ -282,8 +280,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, > */ > OUT_PKT7(ring, CP_EVENT_WRITE, 4); > OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); > - OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); > - OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); > + OUT_RING64(ring, rbmemptr(ring, fence)); > OUT_RING(ring, submit->seqno); > > /* Yield the floor on command completion */ > @@ -469,8 +466,7 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) > > /* Set the save preemption record for the ring/command */ > OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2); > - OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id])); > - OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id])); > + OUT_RING64(ring, a5xx_gpu->preempt_iova[ring->id]); > > /* Turn back on protected mode */ > OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); > diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c > index e9c0e56..ea5bfa5 100644 > --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c > +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c > @@ -190,8 +190,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu) > > /* Kick off the IB to load the GPMU microcode */ > OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); > - OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova)); > - OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova)); > + OUT_RING64(ring, a5xx_gpu->gpmu_iova); > OUT_RING(ring, a5xx_gpu->gpmu_dwords); > > /* Turn back on protected mode */ > diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h > index cffce09..ea8fc39 100644 > --- a/drivers/gpu/drm/msm/msm_ringbuffer.h > +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h > @@ -61,4 +61,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, > *(ring->next++) = data; > } > > +static inline void > +OUT_RING64(struct msm_ringbuffer *ring, uint64_t data) I prefer u64 instead of uint64_t for code. > +{ > + OUT_RING(ring, lower_32_bits(data)); > + OUT_RING(ring, upper_32_bits(data)); > +} > #endif /* __MSM_RINGBUFFER_H__ */ > -- > 1.9.1
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 5cdf104..7a58a01 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -223,8 +223,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* Set the save preemption record for the ring/command */ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2); - OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); - OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); + OUT_RING64(ring, a5xx_gpu->preempt_iova[submit->ring->id]); /* Turn back on protected mode */ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); @@ -248,8 +247,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, break; case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); - OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); - OUT_RING(ring, upper_32_bits(submit->cmd[i].iova)); + OUT_RING64(ring, submit->cmd[i].iova); OUT_RING(ring, submit->cmd[i].size); ibs++; break; @@ -282,8 +280,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, */ OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); - OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); - OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); + OUT_RING64(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->seqno); /* Yield the floor on command completion */ @@ -469,8 +466,7 @@ static int a5xx_preempt_start(struct msm_gpu *gpu) /* Set the save preemption record for the ring/command */ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2); - OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id])); - OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id])); + OUT_RING64(ring, a5xx_gpu->preempt_iova[ring->id]); /* Turn back on protected mode */ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index e9c0e56..ea5bfa5 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -190,8 +190,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu) /* Kick off the IB to load the GPMU microcode */ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); - OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova)); - OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova)); + OUT_RING64(ring, a5xx_gpu->gpmu_iova); OUT_RING(ring, a5xx_gpu->gpmu_dwords); /* Turn back on protected mode */ diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index cffce09..ea8fc39 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -61,4 +61,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, *(ring->next++) = data; } +static inline void +OUT_RING64(struct msm_ringbuffer *ring, uint64_t data) +{ + OUT_RING(ring, lower_32_bits(data)); + OUT_RING(ring, upper_32_bits(data)); +} #endif /* __MSM_RINGBUFFER_H__ */
This patch adds a simple helper function to help write 64 bit payloads to the ringbuffer. Signed-off-by: Sharat Masetty <smasetty@codeaurora.org> --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 12 ++++-------- drivers/gpu/drm/msm/adreno/a5xx_power.c | 3 +-- drivers/gpu/drm/msm/msm_ringbuffer.h | 6 ++++++ 3 files changed, 11 insertions(+), 10 deletions(-)