@@ -38,6 +38,8 @@
#define CMDQ_THR_PRIORITY 0x40
#define GCE_GCTL_VALUE 0x48
+#define GCE_CTRL_BY_SW GENMASK(18, 16)
+#define GCE_DDR_EN GENMASK(2, 0)
#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
#define CMDQ_THR_ENABLED 0x1
@@ -80,16 +82,60 @@ struct cmdq {
bool suspended;
u8 shift_pa;
bool control_by_sw;
+ bool sw_ddr_en;
u32 gce_num;
+ atomic_t usage;
+ spinlock_t lock;
};
struct gce_plat {
u32 thread_nr;
u8 shift;
bool control_by_sw;
+ bool sw_ddr_en;
u32 gce_num;
};
+static s32 cmdq_clk_enable(struct cmdq *cmdq)
+{
+ s32 usage, ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmdq->lock, flags);
+
+ usage = atomic_inc_return(&cmdq->usage);
+
+ ret = clk_bulk_enable(cmdq->gce_num, cmdq->clocks);
+ if (usage <= 0 || ret < 0) {
+ dev_err(cmdq->mbox.dev, "ref count %d ret %d suspend %d\n",
+ usage, ret, cmdq->suspended);
+ } else if (usage == 1) {
+ if (cmdq->sw_ddr_en)
+ writel(GCE_DDR_EN + GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
+ }
+
+ spin_unlock_irqrestore(&cmdq->lock, flags);
+
+ return ret;
+}
+
+static void cmdq_clk_disable(struct cmdq *cmdq)
+{
+ s32 usage;
+
+ usage = atomic_dec_return(&cmdq->usage);
+
+ if (usage < 0) {
+ dev_err(cmdq->mbox.dev, "ref count %d suspend %d\n",
+ usage, cmdq->suspended);
+ } else if (usage == 0) {
+ if (cmdq->sw_ddr_en)
+ writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
+ }
+
+ clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+}
+
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
@@ -266,7 +312,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
if (list_empty(&thread->task_busy_list)) {
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+
+ cmdq_clk_disable(cmdq);
}
}
@@ -355,8 +402,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task->pkt = pkt;
if (list_empty(&thread->task_busy_list)) {
- WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
-
+ WARN_ON(cmdq_clk_enable(cmdq) < 0);
/*
* The thread reset will clear thread related register to 0,
* including pc, end, priority, irq, suspend and enable. Thus
@@ -428,7 +474,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
}
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+ cmdq_clk_disable(cmdq);
done:
/*
@@ -468,7 +514,8 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
cmdq_thread_resume(thread);
cmdq_thread_disable(cmdq, thread);
- clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
+
+ cmdq_clk_disable(cmdq);
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
@@ -543,6 +590,7 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->thread_nr = plat_data->thread_nr;
cmdq->shift_pa = plat_data->shift;
cmdq->control_by_sw = plat_data->control_by_sw;
+ cmdq->sw_ddr_en = plat_data->sw_ddr_en;
cmdq->gce_num = plat_data->gce_num;
cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
@@ -615,6 +663,7 @@ static int cmdq_probe(struct platform_device *pdev)
WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
+ spin_lock_init(&cmdq->lock);
cmdq_init(cmdq);
return 0;
@@ -660,9 +709,18 @@ static const struct gce_plat gce_plat_v6 = {
.gce_num = 2
};
+static const struct gce_plat gce_plat_v7 = {
+ .thread_nr = 24,
+ .shift = 3,
+ .control_by_sw = true,
+ .sw_ddr_en = true,
+ .gce_num = 1
+};
+
static const struct of_device_id cmdq_of_ids[] = {
{.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
{.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
+ {.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
{.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
{.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
{.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
1. enable gce ddr enable(gce reigster offset 0x48, bit 16 to 18) when gce work, and disable gce ddr enable when gce work job done 2. split cmdq clk enable/disable api, and control gce ddr enable/disable in clk enable/disable function to make sure it could protect when cmdq is multiple used by display and mdp this is only for some SOC which has flag "control_by_sw". for this kind of gce, there is a handshake flow between gce and ddr hardware, if not set ddr enable flag of gce, ddr will fall into idle mode, then gce instructions will not process done. we need set this flag of gce to tell ddr when gce is idle or busy controlled by software flow. ddr problem is a special case. when test suspend/resume case, gce sometimes will pull ddr, and ddr can not go to suspend. if we set gce register 0x48 to 0x7, will fix this gce pull ddr issue, as you have referred [1] and [2] (8192 and 8195) but for mt8186, the gce is more special, except setting of [1] and [2], we need add more setting set gce register 0x48 to (0x7 << 16 | 0x7) when gce working to make sure gce could process all instructions ok. this case just need normal bootup, if we not set this, display cmdq task will timeout, and chrome homescreen will always black screen. and with this patch, we have done these test on mt8186: 1.suspend/resume 2.boot up to home screen 3.playback video with youtube. suspend issue is special gce hardware issue, gce client driver command already process done, but gce still pull ddr. Signed-off-by: Yongqiang Niu <yongqiang.niu@mediatek.com> --- change sinc v2: 1. add definition GCE_CTRL_BY_SW and GCE_DDR_EN instead of magic number --- --- drivers/mailbox/mtk-cmdq-mailbox.c | 68 +++++++++++++++++++++++++++--- 1 file changed, 63 insertions(+), 5 deletions(-)