diff mbox series

[v5,04/13] ASoC: Intel: catpt: Implement IPC protocol

Message ID 20200915162944.16241-5-cezary.rojewski@intel.com (mailing list archive)
State Superseded
Headers show
Series ASoC: Intel: Catpt - Lynx and Wildcat point | expand

Commit Message

Cezary Rojewski Sept. 15, 2020, 4:29 p.m. UTC
Implement IRQ handlers for immediate and delayed replies and
notifications. Communication is synchronous and allows for serialization
of maximum one message at a time.

DSP may respond with ADSP_PENDING status for a request - known as
delayed reply - and when situation occurs, framework keeps the lock and
awaits upcoming response through IPCD channel which is handled in
bottom-half. Immediate replies spawn no BH at all as their processing is
very short.

Signed-off-by: Cezary Rojewski <cezary.rojewski@intel.com>
---
 sound/soc/intel/catpt/dsp.c |  99 ++++++++++++
 sound/soc/intel/catpt/ipc.c | 298 ++++++++++++++++++++++++++++++++++++
 2 files changed, 397 insertions(+)
 create mode 100644 sound/soc/intel/catpt/ipc.c
diff mbox series

Patch

diff --git a/sound/soc/intel/catpt/dsp.c b/sound/soc/intel/catpt/dsp.c
index 202d90bb51b4..e50d34a1056e 100644
--- a/sound/soc/intel/catpt/dsp.c
+++ b/sound/soc/intel/catpt/dsp.c
@@ -5,6 +5,7 @@ 
 // Author: Cezary Rojewski <cezary.rojewski@intel.com>
 //
 
+#include <linux/devcoredump.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
 #include <linux/pci.h>
@@ -471,3 +472,101 @@  int wpt_dsp_power_up(struct catpt_dev *cdev)
 
 	return 0;
 }
+
+#define CATPT_DUMP_MAGIC		0xcd42
+#define CATPT_DUMP_SECTION_ID_FILE	0x00
+#define CATPT_DUMP_SECTION_ID_IRAM	0x01
+#define CATPT_DUMP_SECTION_ID_DRAM	0x02
+#define CATPT_DUMP_SECTION_ID_REGS	0x03
+
+struct catpt_dump_section_hdr {
+	u16 magic;
+	u8 core_id;
+	u8 section_id;
+	u32 size;
+};
+
+int catpt_coredump(struct catpt_dev *cdev)
+{
+	struct catpt_dump_section_hdr *hdr;
+	size_t dump_size, regs_size;
+	u8 *dump, *pos;
+	int i, j;
+
+	regs_size = CATPT_SHIM_REGS_SIZE;
+	regs_size += CATPT_DMA_COUNT * CATPT_DMA_REGS_SIZE;
+	regs_size += CATPT_SSP_COUNT * CATPT_SSP_REGS_SIZE;
+	dump_size = resource_size(&cdev->dram);
+	dump_size += resource_size(&cdev->iram);
+	dump_size += regs_size;
+	dump_size += 4 * sizeof(*hdr) + 20; /* hdrs and fw hash */
+
+	dump = vzalloc(dump_size);
+	if (!dump)
+		return -ENOMEM;
+
+	pos = dump;
+
+	hdr = (struct catpt_dump_section_hdr *)pos;
+	hdr->magic = CATPT_DUMP_MAGIC;
+	hdr->core_id = cdev->spec->core_id;
+	hdr->section_id = CATPT_DUMP_SECTION_ID_FILE;
+	hdr->size = dump_size - sizeof(*hdr);
+	pos += sizeof(*hdr);
+
+	for (i = j = 0; i < FW_INFO_SIZE_MAX; i++)
+		if (cdev->ipc.config.fw_info[i] == ' ')
+			if (++j == 4)
+				break;
+	for (j = ++i; j < FW_INFO_SIZE_MAX && j - i < 20; j++) {
+		if (cdev->ipc.config.fw_info[j] == ' ')
+			break;
+		*(pos + j - i) = cdev->ipc.config.fw_info[j];
+	}
+	pos += 20;
+
+	hdr = (struct catpt_dump_section_hdr *)pos;
+	hdr->magic = CATPT_DUMP_MAGIC;
+	hdr->core_id = cdev->spec->core_id;
+	hdr->section_id = CATPT_DUMP_SECTION_ID_IRAM;
+	hdr->size = resource_size(&cdev->iram);
+	pos += sizeof(*hdr);
+
+	memcpy_fromio(pos, cdev->lpe_ba + cdev->iram.start, hdr->size);
+	pos += hdr->size;
+
+	hdr = (struct catpt_dump_section_hdr *)pos;
+	hdr->magic = CATPT_DUMP_MAGIC;
+	hdr->core_id = cdev->spec->core_id;
+	hdr->section_id = CATPT_DUMP_SECTION_ID_DRAM;
+	hdr->size = resource_size(&cdev->dram);
+	pos += sizeof(*hdr);
+
+	memcpy_fromio(pos, cdev->lpe_ba + cdev->dram.start, hdr->size);
+	pos += hdr->size;
+
+	hdr = (struct catpt_dump_section_hdr *)pos;
+	hdr->magic = CATPT_DUMP_MAGIC;
+	hdr->core_id = cdev->spec->core_id;
+	hdr->section_id = CATPT_DUMP_SECTION_ID_REGS;
+	hdr->size = regs_size;
+	pos += sizeof(*hdr);
+
+	memcpy_fromio(pos, catpt_shim_addr(cdev), CATPT_SHIM_REGS_SIZE);
+	pos += CATPT_SHIM_REGS_SIZE;
+
+	for (i = 0; i < CATPT_SSP_COUNT; i++) {
+		memcpy_fromio(pos, catpt_ssp_addr(cdev, i),
+			      CATPT_SSP_REGS_SIZE);
+		pos += CATPT_SSP_REGS_SIZE;
+	}
+	for (i = 0; i < CATPT_DMA_COUNT; i++) {
+		memcpy_fromio(pos, catpt_dma_addr(cdev, i),
+			      CATPT_DMA_REGS_SIZE);
+		pos += CATPT_DMA_REGS_SIZE;
+	}
+
+	dev_coredumpv(cdev->dev, dump, dump_size, GFP_KERNEL);
+
+	return 0;
+}
diff --git a/sound/soc/intel/catpt/ipc.c b/sound/soc/intel/catpt/ipc.c
new file mode 100644
index 000000000000..473f43f6b792
--- /dev/null
+++ b/sound/soc/intel/catpt/ipc.c
@@ -0,0 +1,298 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright(c) 2020 Intel Corporation. All rights reserved.
+//
+// Author: Cezary Rojewski <cezary.rojewski@intel.com>
+//
+
+#include <linux/irqreturn.h>
+#include "core.h"
+#include "messages.h"
+#include "registers.h"
+#include "trace.h"
+
+#define CATPT_IPC_TIMEOUT_MSECS	300
+
+void catpt_ipc_init(struct catpt_ipc *ipc, struct device *dev)
+{
+	ipc->dev = dev;
+	ipc->ready = false;
+	ipc->default_timeout = CATPT_IPC_TIMEOUT_MSECS;
+	init_completion(&ipc->done_completion);
+	init_completion(&ipc->busy_completion);
+	spin_lock_init(&ipc->lock);
+	mutex_init(&ipc->mutex);
+}
+
+static int catpt_ipc_arm(struct catpt_ipc *ipc, struct catpt_fw_ready *config)
+{
+	/*
+	 * Both tx and rx are put into and received from outbox. Inbox is
+	 * only used for notifications where payload size is known upfront,
+	 * thus no separate buffer is allocated for it.
+	 */
+	ipc->rx.data = devm_kzalloc(ipc->dev, config->outbox_size, GFP_KERNEL);
+	if (!ipc->rx.data)
+		return -ENOMEM;
+
+	memcpy(&ipc->config, config, sizeof(*config));
+	ipc->ready = true;
+
+	return 0;
+}
+
+static void catpt_ipc_msg_init(struct catpt_ipc *ipc,
+			       struct catpt_ipc_msg *reply)
+{
+	lockdep_assert_held(&ipc->lock);
+
+	ipc->rx.header = 0;
+	ipc->rx.size = reply ? reply->size : 0;
+	reinit_completion(&ipc->done_completion);
+	reinit_completion(&ipc->busy_completion);
+}
+
+static void catpt_dsp_send_tx(struct catpt_dev *cdev,
+			      const struct catpt_ipc_msg *tx)
+{
+	u32 header = tx->header | CATPT_IPCC_BUSY;
+
+	trace_catpt_ipc_request(header);
+	trace_catpt_ipc_payload(tx->data, tx->size);
+
+	if (tx->size)
+		memcpy_toio(catpt_outbox_addr(cdev), tx->data, tx->size);
+	catpt_writel_shim(cdev, IPCC, header);
+}
+
+static int catpt_wait_msg_completion(struct catpt_dev *cdev, int timeout)
+{
+	struct catpt_ipc *ipc = &cdev->ipc;
+	int ret;
+
+	ret = wait_for_completion_timeout(&ipc->done_completion,
+					  msecs_to_jiffies(timeout));
+	if (!ret)
+		return -ETIMEDOUT;
+	if (ipc->rx.rsp.status != CATPT_REPLY_PENDING)
+		return 0;
+
+	/* wait for delayed reply */
+	ret = wait_for_completion_timeout(&ipc->busy_completion,
+					  msecs_to_jiffies(timeout));
+	return ret ? 0 : -ETIMEDOUT;
+}
+
+static int catpt_dsp_do_send_msg(struct catpt_dev *cdev,
+				 struct catpt_ipc_msg request,
+				 struct catpt_ipc_msg *reply, int timeout)
+{
+	struct catpt_ipc *ipc = &cdev->ipc;
+	unsigned long flags;
+	int ret;
+
+	if (!ipc->ready)
+		return -EPERM;
+	if (request.size > ipc->config.outbox_size ||
+	    (reply && reply->size > ipc->config.outbox_size))
+		return -EINVAL;
+
+	spin_lock_irqsave(&ipc->lock, flags);
+	catpt_ipc_msg_init(ipc, reply);
+	catpt_dsp_send_tx(cdev, &request);
+	spin_unlock_irqrestore(&ipc->lock, flags);
+
+	ret = catpt_wait_msg_completion(cdev, timeout);
+	if (ret) {
+		dev_crit(cdev->dev, "communication severed: %d, rebooting dsp..\n",
+			 ret);
+		ipc->ready = false;
+		/* TODO: attempt recovery */
+		return ret;
+	}
+
+	ret = ipc->rx.rsp.status;
+	if (reply) {
+		reply->header = ipc->rx.header;
+		if (!ret && reply->data && reply->size)
+			memcpy(reply->data, ipc->rx.data, ipc->rx.size);
+	}
+
+	return ret;
+}
+
+int catpt_dsp_send_msg_timeout(struct catpt_dev *cdev,
+			       struct catpt_ipc_msg request,
+			       struct catpt_ipc_msg *reply, int timeout)
+{
+	struct catpt_ipc *ipc = &cdev->ipc;
+	int ret;
+
+	mutex_lock(&ipc->mutex);
+	ret = catpt_dsp_do_send_msg(cdev, request, reply, timeout);
+	mutex_unlock(&ipc->mutex);
+
+	return ret;
+}
+
+int catpt_dsp_send_msg(struct catpt_dev *cdev, struct catpt_ipc_msg request,
+		       struct catpt_ipc_msg *reply)
+{
+	return catpt_dsp_send_msg_timeout(cdev, request, reply,
+					  cdev->ipc.default_timeout);
+}
+
+static void
+catpt_dsp_notify_stream(struct catpt_dev *cdev, union catpt_notify_msg msg)
+{
+	struct catpt_stream_runtime *stream;
+	struct catpt_notify_position pos;
+	struct catpt_notify_glitch glitch;
+
+	stream = catpt_stream_find(cdev, msg.stream_hw_id);
+	if (!stream) {
+		dev_warn(cdev->dev, "notify %d for non-existent stream %d\n",
+			 msg.notify_reason, msg.stream_hw_id);
+		return;
+	}
+
+	switch (msg.notify_reason) {
+	case CATPT_NOTIFY_POSITION_CHANGED:
+		memcpy_fromio(&pos, catpt_inbox_addr(cdev), sizeof(pos));
+		trace_catpt_ipc_payload((u8 *)&pos, sizeof(pos));
+
+		catpt_stream_update_position(cdev, stream, &pos);
+		break;
+
+	case CATPT_NOTIFY_GLITCH_OCCURRED:
+		memcpy_fromio(&glitch, catpt_inbox_addr(cdev), sizeof(glitch));
+		trace_catpt_ipc_payload((u8 *)&glitch, sizeof(glitch));
+
+		dev_warn(cdev->dev, "glitch %d at pos: 0x%08llx, wp: 0x%08x\n",
+			 glitch.type, glitch.presentation_pos,
+			 glitch.write_pos);
+		break;
+
+	default:
+		dev_warn(cdev->dev, "unknown notification: %d received\n",
+			 msg.notify_reason);
+		break;
+	}
+}
+
+static void catpt_dsp_copy_rx(struct catpt_dev *cdev, u32 header)
+{
+	struct catpt_ipc *ipc = &cdev->ipc;
+
+	ipc->rx.header = header;
+	if (ipc->rx.size && ipc->rx.rsp.status == CATPT_REPLY_SUCCESS) {
+		memcpy_fromio(ipc->rx.data, catpt_outbox_addr(cdev),
+			      ipc->rx.size);
+		trace_catpt_ipc_payload(ipc->rx.data, ipc->rx.size);
+	}
+}
+
+static void catpt_dsp_process_response(struct catpt_dev *cdev, u32 header)
+{
+	union catpt_notify_msg msg = CATPT_MSG(header);
+	struct catpt_ipc *ipc = &cdev->ipc;
+
+	if (msg.fw_ready) {
+		struct catpt_fw_ready config;
+		/* to fit 32b header original address is shifted right by 3 */
+		u32 off = msg.mailbox_address << 3;
+
+		memcpy_fromio(&config, cdev->lpe_ba + off, sizeof(config));
+		trace_catpt_ipc_payload((u8 *)&config, sizeof(config));
+
+		catpt_ipc_arm(ipc, &config);
+		complete(&cdev->fw_ready);
+		return;
+	}
+
+	switch (msg.type) {
+	case CATPT_GLB_REQUEST_CORE_DUMP:
+		dev_err(cdev->dev, "ADSP device coredump received\n");
+		ipc->ready = false;
+		catpt_coredump(cdev);
+		/* TODO: attempt recovery */
+		break;
+
+	case CATPT_GLB_STREAM_MESSAGE:
+		switch (msg.subtype) {
+		case CATPT_STRM_NOTIFICATION:
+			catpt_dsp_notify_stream(cdev, msg);
+			break;
+		default:
+			catpt_dsp_copy_rx(cdev, header);
+			/* signal completion of delayed reply */
+			complete(&ipc->busy_completion);
+			break;
+		}
+		break;
+
+	default:
+		dev_warn(cdev->dev, "unknown response: %d received\n",
+			 msg.type);
+		break;
+	}
+}
+
+irqreturn_t catpt_dsp_irq_thread(int irq, void *dev_id)
+{
+	struct catpt_dev *cdev = dev_id;
+	u32 ipcd;
+
+	ipcd = catpt_readl_shim(cdev, IPCD);
+	trace_catpt_ipc_notify(ipcd);
+
+	/* ensure there is delayed reply or notification to process */
+	if (!(ipcd & CATPT_IPCD_BUSY))
+		return IRQ_NONE;
+
+	catpt_dsp_process_response(cdev, ipcd);
+
+	/* tell DSP processing is completed */
+	catpt_updatel_shim(cdev, IPCD, CATPT_IPCD_BUSY | CATPT_IPCD_DONE,
+			   CATPT_IPCD_DONE);
+	/* unmask dsp BUSY interrupt */
+	catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB, 0);
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t catpt_dsp_irq_handler(int irq, void *dev_id)
+{
+	struct catpt_dev *cdev = dev_id;
+	irqreturn_t ret = IRQ_NONE;
+	u32 isc, ipcc;
+
+	isc = catpt_readl_shim(cdev, ISC);
+	trace_catpt_irq(isc);
+
+	/* immediate reply */
+	if (isc & CATPT_ISC_IPCCD) {
+		/* mask host DONE interrupt */
+		catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCCD, CATPT_IMC_IPCCD);
+
+		ipcc = catpt_readl_shim(cdev, IPCC);
+		trace_catpt_ipc_reply(ipcc);
+		catpt_dsp_copy_rx(cdev, ipcc);
+		complete(&cdev->ipc.done_completion);
+
+		/* tell DSP processing is completed */
+		catpt_updatel_shim(cdev, IPCC, CATPT_IPCC_DONE, 0);
+		/* unmask host DONE interrupt */
+		catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCCD, 0);
+		ret = IRQ_HANDLED;
+	}
+
+	/* delayed reply or notification */
+	if (isc & CATPT_ISC_IPCDB) {
+		/* mask dsp BUSY interrupt */
+		catpt_updatel_shim(cdev, IMC, CATPT_IMC_IPCDB, CATPT_IMC_IPCDB);
+		ret = IRQ_WAKE_THREAD;
+	}
+
+	return ret;
+}