diff mbox series

[V2,2/2] mailbox: tmelite-qmp: Introduce TMEL QMP mailbox driver

Message ID 20241231054900.2144961-3-quic_srichara@quicinc.com (mailing list archive)
State Not Applicable, archived
Headers show
Series mailbox: tmel-qmp: Introduce QCOM TMEL QMP mailbox driver | expand

Commit Message

Sricharan Ramabadhran Dec. 31, 2024, 5:49 a.m. UTC
From: Sricharan Ramabadhran <quic_srichara@quicinc.com>

This mailbox facilitates the communication between the TME-L server based
subsystems (Q6) and the TME-L client (APPSS/BTSS/AUDIOSS), used for security
services like secure image authentication, enable/disable efuses, crypto
services. Each client in the   SoC has its own block of message RAM and IRQ
for communication with the TME-L SS. The protocol used to communicate in the
message RAM is known as Qualcomm Messaging Protocol (QMP).

Remote proc driver subscribes to this mailbox and uses the mbox_send_message
to use TME-L to securely authenticate/teardown the images.

Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
---
  [v2] Added worker for mailbox tx processing, since some of the operations can sleep
       Fixed checkpatch warnings. Some [CHECK] like below still exist, but that looks
       like a false postive.

       CHECK: Macro argument 'm' may be better as '(m)' to avoid precedence issues
        #1072: FILE: include/linux/mailbox/tmelcom-qmp.h:40:
        +#define TMEL_MSG_UID_CREATE(m, a)      ((u32)(((m & 0xff) << 8) | (a & 0xff)))

 drivers/mailbox/Kconfig             |   7 +
 drivers/mailbox/Makefile            |   2 +
 drivers/mailbox/qcom-tmel-qmp.c     | 971 ++++++++++++++++++++++++++++
 include/linux/mailbox/tmelcom-qmp.h | 157 +++++
 4 files changed, 1137 insertions(+)
 create mode 100644 drivers/mailbox/qcom-tmel-qmp.c
 create mode 100644 include/linux/mailbox/tmelcom-qmp.h

Comments

Varadarajan Narayanan Dec. 31, 2024, 6:21 a.m. UTC | #1
On Tue, Dec 31, 2024 at 11:19:00AM +0530, Sricharan R wrote:
> From: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>
> This mailbox facilitates the communication between the TME-L server based
> subsystems (Q6) and the TME-L client (APPSS/BTSS/AUDIOSS), used for security
> services like secure image authentication, enable/disable efuses, crypto
> services. Each client in the   SoC has its own block of message RAM and IRQ

Extra space before 'SoC'.

> for communication with the TME-L SS. The protocol used to communicate in the
> message RAM is known as Qualcomm Messaging Protocol (QMP).
>
> Remote proc driver subscribes to this mailbox and uses the mbox_send_message
> to use TME-L to securely authenticate/teardown the images.
>
> Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
> ---
>   [v2] Added worker for mailbox tx processing, since some of the operations can sleep
>        Fixed checkpatch warnings. Some [CHECK] like below still exist, but that looks
>        like a false postive.
>
>        CHECK: Macro argument 'm' may be better as '(m)' to avoid precedence issues
>         #1072: FILE: include/linux/mailbox/tmelcom-qmp.h:40:
>         +#define TMEL_MSG_UID_CREATE(m, a)      ((u32)(((m & 0xff) << 8) | (a & 0xff)))
>
>  drivers/mailbox/Kconfig             |   7 +
>  drivers/mailbox/Makefile            |   2 +
>  drivers/mailbox/qcom-tmel-qmp.c     | 971 ++++++++++++++++++++++++++++
>  include/linux/mailbox/tmelcom-qmp.h | 157 +++++
>  4 files changed, 1137 insertions(+)
>  create mode 100644 drivers/mailbox/qcom-tmel-qmp.c
>  create mode 100644 include/linux/mailbox/tmelcom-qmp.h
>
> diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
> index 8ecba7fb999e..8ad0b834d617 100644
> --- a/drivers/mailbox/Kconfig
> +++ b/drivers/mailbox/Kconfig
> @@ -306,4 +306,11 @@ config THEAD_TH1520_MBOX
>  	  kernel is running, and E902 core used for power management among other
>  	  things.
>
> +config QCOM_TMEL_QMP_MAILBOX
> +	tristate "QCOM Mailbox Protocol(QMP) for TME-L SS"

Please add the usual checks to QCOM_TMEL_QMP_MAILBOX to avoid randomconfig bot errors.

> +	help
> +	  Say yes to add support for the QMP Mailbox Protocol driver for TME-L.
> +	  QMP is a lightweight communication protocol for sending messages to
> +	  TME-L. This protocol fits into the Generic Mailbox Framework.
> +	  QMP uses a mailbox registers.
>  endif
> diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
> index 5f4f5b0ce2cc..4dba283a94ad 100644
> --- a/drivers/mailbox/Makefile
> +++ b/drivers/mailbox/Makefile
> @@ -66,3 +66,5 @@ obj-$(CONFIG_QCOM_CPUCP_MBOX)	+= qcom-cpucp-mbox.o
>  obj-$(CONFIG_QCOM_IPCC)		+= qcom-ipcc.o
>
>  obj-$(CONFIG_THEAD_TH1520_MBOX)	+= mailbox-th1520.o
> +
> +obj-$(CONFIG_QCOM_TMEL_QMP_MAILBOX) += qcom-tmel-qmp.o
> diff --git a/drivers/mailbox/qcom-tmel-qmp.c b/drivers/mailbox/qcom-tmel-qmp.c
> new file mode 100644
> index 000000000000..6de0a418e0ae
> --- /dev/null
> +++ b/drivers/mailbox/qcom-tmel-qmp.c
> @@ -0,0 +1,971 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
> + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
> + */

Copyright year should be updated if you post next version.

> +
> +#include <linux/completion.h>
> +#include <linux/delay.h>
> +#include <linux/dma-direction.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/kernel.h>
> +#include <linux/kthread.h>
> +#include <linux/mailbox_client.h>
> +#include <linux/mailbox_controller.h>
> +#include <linux/mailbox/tmelcom-qmp.h>
> +#include <linux/module.h>
> +#include <linux/of.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_platform.h>
> +#include <linux/platform_device.h>
> +#include <linux/spinlock.h>
> +#include <linux/types.h>
> +#include <linux/uaccess.h>
> +#include <linux/uio.h>
> +#include <linux/workqueue.h>
> +
> +#define QMP_NUM_CHANS	0x1
> +#define QMP_TOUT_MS	1000
> +#define MBOX_ALIGN_BYTES	3
> +#define QMP_CTRL_DATA_SIZE	4
> +#define QMP_MAX_PKT_SIZE	0x18
> +#define QMP_UCORE_DESC_OFFSET	0x1000
> +
> +#define QMP_CH_VAR_GET(mdev, desc, var) ((mdev)->desc.bits.var)
> +#define QMP_CH_VAR_SET(mdev, desc, var) (mdev)->desc.bits.var = 1
> +#define QMP_CH_VAR_CLR(mdev, desc, var) (mdev)->desc.bits.var = 0
> +
> +#define QMP_MCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, mcore, var)
> +#define QMP_MCORE_CH_VAR_SET(mdev, var)	QMP_CH_VAR_SET(mdev, mcore, var)
> +#define QMP_MCORE_CH_VAR_CLR(mdev, var)	QMP_CH_VAR_CLR(mdev, mcore, var)
> +
> +#define QMP_MCORE_CH_VAR_TOGGLE(mdev, var) \
> +	(mdev)->mcore.bits.var = !((mdev)->mcore.bits.var)
> +#define QMP_MCORE_CH_ACKED_CHECK(mdev, var) \
> +	((mdev)->ucore.bits.var == (mdev)->mcore.bits.var##_ack)
> +#define QMP_MCORE_CH_ACK_UPDATE(mdev, var) \
> +	(mdev)->mcore.bits.var##_ack = (mdev)->ucore.bits.var
> +#define QMP_MCORE_CH_VAR_ACK_CLR(mdev, var) \
> +	(mdev)->mcore.bits.var##_ack = 0
> +
> +#define QMP_UCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, ucore, var)
> +#define QMP_UCORE_CH_ACKED_CHECK(mdev, var) \
> +	((mdev)->mcore.bits.var == (mdev)->ucore.bits.var##_ack)
> +#define QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, var) \
> +	((mdev)->ucore.bits.var != (mdev)->mcore.bits.var##_ack)
> +
> +/**
> + * enum qmp_local_state -	definition of the local state machine
> + * @LINK_DISCONNECTED:		Init state, waiting for ucore to start
> + * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
> + * @LINK_CONNECTED:		Link state up, channel not connected
> + * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
> + * @CHANNEL_CONNECTED:		Channel fully opened
> + * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack
> + */
> +enum qmp_local_state {
> +	LINK_DISCONNECTED,
> +	LINK_NEGOTIATION,
> +	LINK_CONNECTED,
> +	LOCAL_CONNECTING,
> +	CHANNEL_CONNECTED,
> +	LOCAL_DISCONNECTING,
> +};
> +
> +union channel_desc {
> +	struct {
> +		u32 link_state:1;
> +		u32 link_state_ack:1;
> +		u32 ch_state:1;
> +		u32 ch_state_ack:1;
> +		u32 tx:1;
> +		u32 tx_ack:1;
> +		u32 rx_done:1;
> +		u32 rx_done_ack:1;
> +		u32 read_int:1;
> +		u32 read_int_ack:1;
> +		u32 reserved:6;
> +		u32 frag_size:8;
> +		u32 rem_frag_count:8;
> +	} bits;
> +	unsigned int val;
> +};
> +
> +struct qmp_work {
> +	struct work_struct work;
> +	void *data;
> +};
> +
> +/**
> + * struct qmp_device - local information for managing a single mailbox
> + * @dev:	    The device that corresponds to this mailbox
> + * @ctrl:	    The mbox controller for this mailbox
> + * @mcore_desc:	    Local core (APSS) mailbox descriptor
> + * @ucore_desc:	    Remote core (TME-L) mailbox descriptor
> + * @mcore:	    Local core (APSS) channel descriptor
> + * @ucore:	    Remote core (TME-L) channel descriptor
> + * @rx_pkt:	    Buffer to pass to client, holds received data from mailbox
> + * @tx_pkt:	    Buffer from client, holds data to send on mailbox
> + * @mbox_client:    Mailbox client for the IPC interrupt
> + * @mbox_chan:	    Mailbox client chan for the IPC interrupt
> + * @local_state:    Current state of mailbox protocol
> + * @state_lock:	    Serialize mailbox state changes
> + * @tx_lock:	    Serialize access for writes to mailbox
> + * @link_complete:  Use to block until link negotiation with remote proc
> + * @ch_complete:    Use to block until the channel is fully opened
> + * @dwork:	    Delayed work to detect timed out tx
> + * @tx_sent:	    True if tx is sent and remote proc has not sent ack
> + */
> +struct qmp_device {
> +	struct device *dev;
> +	struct mbox_controller ctrl;
> +	struct qmp_work qwork;
> +
> +	void __iomem *mcore_desc;
> +	void __iomem *ucore_desc;
> +	union channel_desc mcore;
> +	union channel_desc ucore;
> +
> +	struct kvec rx_pkt;
> +	struct kvec tx_pkt;
> +
> +	struct mbox_client mbox_client;
> +	struct mbox_chan *mbox_chan;
> +
> +	enum qmp_local_state local_state;
> +
> +	/* Lock for QMP link state changes */
> +	struct mutex state_lock;
> +	/* Lock to serialize access to mailbox */
> +	spinlock_t tx_lock;
> +
> +	struct completion link_complete;
> +	struct completion ch_complete;
> +	struct delayed_work dwork;
> +	void *data;
> +
> +	bool tx_sent;
> +	bool ch_in_use;
> +};
> +
> +struct tmel_msg_param_type_buf_in {
> +	u32 buf;
> +	u32 buf_len;
> +};
> +
> +struct tmel_secboot_sec_auth_req {
> +	u32 sw_id;
> +	struct tmel_msg_param_type_buf_in elf_buf;
> +	struct tmel_msg_param_type_buf_in region_list;
> +	u32 relocate;
> +} __packed;
> +
> +struct tmel_secboot_sec_auth_resp {
> +	u32 first_seg_addr;
> +	u32 first_seg_len;
> +	u32 entry_addr;
> +	u32 extended_error;
> +	u32 status;
> +} __packed;
> +
> +struct tmel_secboot_sec_auth {
> +	struct tmel_secboot_sec_auth_req req;
> +	struct tmel_secboot_sec_auth_resp resp;
> +} __packed;
> +
> +struct tmel_secboot_teardown_req {
> +	u32 sw_id;
> +	u32 secondary_sw_id;
> +} __packed;
> +
> +struct tmel_secboot_teardown_resp {
> +	u32 status;
> +} __packed;
> +
> +struct tmel_secboot_teardown {
> +	struct tmel_secboot_teardown_req req;
> +	struct tmel_secboot_teardown_resp resp;
> +} __packed;
> +
> +struct tmel {
> +	struct device *dev;
> +	struct qmp_device *mdev;
> +	struct kvec pkt;
> +	/* To serialize incoming tmel request */
> +	struct mutex lock;
> +	struct tmel_ipc_pkt *ipc_pkt;
> +	dma_addr_t sram_dma_addr;
> +	wait_queue_head_t waitq;
> +	bool rx_done;
> +};

Typically it is expected that structs/unions/enums/functions have
a similar prefix for the entire driver. Here multiple prefixes
like qmp_local_state, channel_desc, qmp_work, tmel_xxx are used
please see if it is possible to change.

> +static struct tmel *tmeldev;

Can this be avoided?

> +/**
> + * qmp_send_irq() - send an irq to a remote entity as an event signal.
> + * @mdev:       Which remote entity that should receive the irq.
> + */
> +static void qmp_send_irq(struct qmp_device *mdev)
> +{
> +	/* Update the mcore val to mcore register */

Remove ^^^^

> +	iowrite32(mdev->mcore.val, mdev->mcore_desc);
> +	/* Ensure desc update is visible before IPC */
> +	wmb();
> +
> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
> +		mdev->mcore.val, mdev->ucore.val);
> +
> +	mbox_send_message(mdev->mbox_chan, NULL);
> +	mbox_client_txdone(mdev->mbox_chan, 0);
> +}
> +
> +/**
> + * qmp_notify_timeout() - Notify client of tx timeout with -ETIME
> + * @work:		  Structure for work that was scheduled.
> + */
> +static void qmp_notify_timeout(struct work_struct *work)
> +{
> +	struct delayed_work *dwork = to_delayed_work(work);
> +	struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
> +	int err = -ETIME;

'int err' can be removed and -ETIME can be used directly in
mbox_chan_txdone

> +	unsigned long flags;

This and other functions reverse xmas tree for variable
declaration.

> +
> +	spin_lock_irqsave(&mdev->tx_lock, flags);
> +	if (!mdev->tx_sent) {
> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +		return;
> +	}
> +	mdev->tx_sent = false;
> +	spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +	dev_dbg(mdev->dev, "%s: TX timeout", __func__);
> +	mbox_chan_txdone(chan, err);
> +}
> +
> +static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
> +{
> +	schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TOUT_MS));
> +}
> +
> +/**
> + * tmel_qmp_startup() - Start qmp mailbox channel for communication. Waits for
> + *		       remote subsystem to open channel if link is not
> + *		       initated or until timeout.
> + * @chan:	       mailbox channel that is being opened.
> + *
> + * Return: 0 on succes or standard Linux error code.

success

> + */
> +static int tmel_qmp_startup(struct mbox_chan *chan)
> +{
> +	struct qmp_device *mdev = chan->con_priv;
> +	int ret;
> +
> +	if (!mdev)
> +		return -EINVAL;
> +
> +	ret = wait_for_completion_timeout(&mdev->link_complete,
> +					  msecs_to_jiffies(QMP_TOUT_MS));
> +	if (!ret)
> +		return -EAGAIN;
> +
> +	mutex_lock(&mdev->state_lock);
> +	if (mdev->local_state == LINK_CONNECTED) {
> +		QMP_MCORE_CH_VAR_SET(mdev, ch_state);
> +		mdev->local_state = LOCAL_CONNECTING;
> +		dev_dbg(mdev->dev, "link complete, local connecting");
> +		qmp_send_irq(mdev);
> +	}
> +	mutex_unlock(&mdev->state_lock);
> +
> +	ret = wait_for_completion_timeout(&mdev->ch_complete,
> +					  msecs_to_jiffies(QMP_TOUT_MS));
> +	if (!ret)
> +		return -ETIME;
> +
> +	return 0;
> +}
> +
> +/**
> + * qmp_send_data() - Copy the data to the channel's mailbox and notify
> + *		     remote subsystem of new data. This function will
> + *		     return an error if the previous message sent has
> + *		     not been read. Cannot Sleep.
> + * @chan:	mailbox channel that data is to be sent over.
> + * @data:	Data to be sent to remote processor, should be in the format of
> + *		a kvec.
> + *
> + * Return: 0 on succes or standard Linux error code.

success

> + */
> +static int qmp_send_data(struct qmp_device *mdev, void *data)
> +{
> +	struct kvec *pkt = (struct kvec *)data;
> +	void __iomem *addr;
> +	unsigned long flags;
> +
> +	if (!mdev || !data || !completion_done(&mdev->ch_complete))
> +		return -EINVAL;
> +
> +	if (pkt->iov_len > QMP_MAX_PKT_SIZE) {
> +		dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
> +		return -EINVAL;
> +	}
> +
> +	spin_lock_irqsave(&mdev->tx_lock, flags);
> +	if (mdev->tx_sent) {
> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +		return -EAGAIN;
> +	}
> +
> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
> +		mdev->mcore.val, mdev->ucore.val);
> +
> +	addr = mdev->mcore_desc + QMP_CTRL_DATA_SIZE;
> +	memcpy_toio(addr, pkt->iov_base, pkt->iov_len);
> +
> +	mdev->mcore.bits.frag_size = pkt->iov_len;
> +	mdev->mcore.bits.rem_frag_count = 0;
> +
> +	dev_dbg(mdev->dev, "Copied buffer to mbox, sz: %d",
> +		mdev->mcore.bits.frag_size);
> +
> +	mdev->tx_sent = true;
> +	QMP_MCORE_CH_VAR_TOGGLE(mdev, tx);
> +	qmp_send_irq(mdev);

In all places qmp_send_irq is invoked under mutex of
"state_lock". But in qmp_send_data alone it seems to be invoked
under spin_lock of "tx_lock". While qmp_send_data itself is
called under mutex of tdev->lock. Hope that is not a potential
race.

> +	qmp_schedule_tx_timeout(mdev);
> +	spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +
> +	return 0;
> +}
> +
> +/**
> + * tmel_qmp_shutdown() - Disconnect this mailbox channel so the client does not
> + *			 receive anymore data and can reliquish control
> + *			 of the channel.
> + * @chan:		 mailbox channel to be shutdown.
> + */
> +static void tmel_qmp_shutdown(struct mbox_chan *chan)
> +{
> +	struct qmp_device *mdev = chan->con_priv;
> +
> +	mutex_lock(&mdev->state_lock);
> +	if (mdev->local_state != LINK_DISCONNECTED) {
> +		mdev->local_state = LOCAL_DISCONNECTING;
> +		QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
> +		qmp_send_irq(mdev);
> +	}
> +	mutex_unlock(&mdev->state_lock);
> +}
> +
> +static void tmel_receive_message(void *message)
> +{
> +	struct tmel *tdev = tmeldev;
> +	struct kvec *pkt = NULL;
> +
> +	if (!message) {
> +		pr_err("spurious message received\n");
> +		goto tmel_receive_end;
> +	}
> +
> +	if (tdev->rx_done) {
> +		pr_err("tmel response pending\n");
> +		goto tmel_receive_end;
> +	}
> +
> +	pkt = (struct kvec *)message;
> +	tdev->pkt.iov_len = pkt->iov_len;
> +	tdev->pkt.iov_base = pkt->iov_base;
> +	tdev->rx_done = true;
> +
> +tmel_receive_end:
> +	wake_up_interruptible(&tdev->waitq);
> +}
> +
> +/**
> + * qmp_recv_data() -	received notification that data is available in the
> + *			mailbox. Copy data from mailbox and pass to client.
> + * @mbox:		mailbox device that received the notification.
> + * @mbox_of:		offset of mailbox after QMP Control data.
> + */
> +static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
> +{
> +	void __iomem *addr;
> +	struct kvec *pkt;
> +
> +	addr = mdev->ucore_desc + mbox_of;
> +	pkt = &mdev->rx_pkt;
> +	pkt->iov_len = mdev->ucore.bits.frag_size;
> +
> +	memcpy_fromio(pkt->iov_base, addr, pkt->iov_len);
> +	QMP_MCORE_CH_ACK_UPDATE(mdev, tx);
> +	dev_dbg(mdev->dev, "%s: Send RX data to TMEL Client", __func__);
> +	tmel_receive_message(pkt);
> +
> +	QMP_MCORE_CH_VAR_TOGGLE(mdev, rx_done);
> +	qmp_send_irq(mdev);
> +}
> +
> +/**
> + * clr_mcore_ch_state() - Clear the mcore state of a mailbox.
> + * @mdev:	mailbox device to be initialized.
> + */
> +static void clr_mcore_ch_state(struct qmp_device *mdev)
> +{
> +	QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, ch_state);
> +
> +	QMP_MCORE_CH_VAR_CLR(mdev, tx);
> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, tx);
> +
> +	QMP_MCORE_CH_VAR_CLR(mdev, rx_done);
> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, rx_done);
> +
> +	QMP_MCORE_CH_VAR_CLR(mdev, read_int);
> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, read_int);
> +
> +	mdev->mcore.bits.frag_size = 0;
> +	mdev->mcore.bits.rem_frag_count = 0;
> +}
> +
> +/**
> + * qmp_rx() - Handle incoming messages from remote processor.
> + * @mbox:	mailbox device that received notification.
> + */
> +static void qmp_rx(struct qmp_device *mdev)
> +{
> +	unsigned long flags;
> +
> +	/* read remote_desc from mailbox register */
> +	mdev->ucore.val = ioread32(mdev->ucore_desc);
> +
> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
> +		mdev->mcore.val, mdev->ucore.val);
> +
> +	mutex_lock(&mdev->state_lock);
> +
> +	/* Check if remote link down */
> +	if (mdev->local_state >= LINK_CONNECTED &&
> +	    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
> +		mdev->local_state = LINK_NEGOTIATION;
> +		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
> +		qmp_send_irq(mdev);
> +		mutex_unlock(&mdev->state_lock);
> +		return;
> +	}
> +
> +	switch (mdev->local_state) {
> +	case LINK_DISCONNECTED:
> +		QMP_MCORE_CH_VAR_SET(mdev, link_state);
> +		mdev->local_state = LINK_NEGOTIATION;
> +		mdev->rx_pkt.iov_base = kzalloc(QMP_MAX_PKT_SIZE,
> +						GFP_KERNEL);
> +
> +		if (!mdev->rx_pkt.iov_base) {
> +			dev_err(mdev->dev, "rx pkt alloc failed");
> +			break;
> +		}
> +		dev_dbg(mdev->dev, "Set to link negotiation");
> +		qmp_send_irq(mdev);
> +
> +		break;
> +	case LINK_NEGOTIATION:
> +		if (!QMP_MCORE_CH_VAR_GET(mdev, link_state) ||
> +		    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
> +			dev_err(mdev->dev, "rx irq:link down state\n");
> +			break;
> +		}
> +
> +		clr_mcore_ch_state(mdev);
> +		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
> +		mdev->local_state = LINK_CONNECTED;
> +		complete_all(&mdev->link_complete);
> +		dev_dbg(mdev->dev, "Set to link connected");
> +
> +		break;
> +	case LINK_CONNECTED:
> +		/* No need to handle until local opens */
> +		break;
> +	case LOCAL_CONNECTING:
> +		/* Ack to remote ch_state change */
> +		QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
> +
> +		mdev->local_state = CHANNEL_CONNECTED;
> +		complete_all(&mdev->ch_complete);
> +		dev_dbg(mdev->dev, "Set to channel connected");
> +		qmp_send_irq(mdev);
> +		break;
> +	case CHANNEL_CONNECTED:
> +		/* Check for remote channel down */
> +		if (!QMP_UCORE_CH_VAR_GET(mdev, ch_state)) {
> +			mdev->local_state = LOCAL_CONNECTING;
> +			QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
> +			dev_dbg(mdev->dev, "Remote Disconnect");
> +			qmp_send_irq(mdev);
> +		}
> +
> +		spin_lock_irqsave(&mdev->tx_lock, flags);
> +		/* Check TX done */
> +		if (mdev->tx_sent &&
> +		    QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, rx_done)) {
> +			/* Ack to remote */
> +			QMP_MCORE_CH_ACK_UPDATE(mdev, rx_done);
> +			mdev->tx_sent = false;
> +			cancel_delayed_work(&mdev->dwork);
> +			dev_dbg(mdev->dev, "TX flag cleared");
> +		}
> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +
> +		/* Check if remote is Transmitting */
> +		if (!QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, tx))
> +			break;
> +		if (mdev->ucore.bits.frag_size == 0 ||
> +		    mdev->ucore.bits.frag_size > QMP_MAX_PKT_SIZE) {
> +			dev_err(mdev->dev, "Rx frag size error %d\n",
> +				mdev->ucore.bits.frag_size);
> +			break;
> +		}
> +
> +		qmp_recv_data(mdev, QMP_CTRL_DATA_SIZE);
> +		break;
> +	case LOCAL_DISCONNECTING:
> +		if (!QMP_MCORE_CH_VAR_GET(mdev, ch_state)) {
> +			clr_mcore_ch_state(mdev);
> +			mdev->local_state = LINK_CONNECTED;
> +			dev_dbg(mdev->dev, "Channel closed");
> +			reinit_completion(&mdev->ch_complete);
> +		}
> +
> +		break;
> +	default:
> +		dev_err(mdev->dev, "Local Channel State corrupted\n");
> +	}
> +	mutex_unlock(&mdev->state_lock);
> +}
> +
> +static irqreturn_t qmp_irq_handler(int irq, void *priv)
> +{
> +	struct qmp_device *mdev = (struct qmp_device *)priv;
> +
> +	qmp_rx(mdev);
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int tmel_qmp_parse_devicetree(struct platform_device *pdev,
> +				     struct qmp_device *mdev)
> +{
> +	struct device *dev = &pdev->dev;
> +
> +	mdev->mcore_desc = devm_platform_ioremap_resource(pdev, 0);
> +	if (!mdev->mcore_desc) {
> +		dev_err(dev, "ioremap failed for mcore reg\n");
> +		return -EIO;
> +	}
> +
> +	mdev->ucore_desc = mdev->mcore_desc + QMP_UCORE_DESC_OFFSET;
> +
> +	mdev->mbox_client.dev = dev;
> +	mdev->mbox_client.knows_txdone = false;
> +	mdev->mbox_chan = mbox_request_channel(&mdev->mbox_client, 0);
> +	if (IS_ERR(mdev->mbox_chan)) {
> +		dev_err(dev, "mbox chan for IPC is missing\n");
> +		return PTR_ERR(mdev->mbox_chan);
> +	}
> +
> +	return 0;
> +}
> +
> +static void tmel_qmp_remove(struct platform_device *pdev)
> +{
> +	struct qmp_device *mdev = platform_get_drvdata(pdev);
> +
> +	mbox_controller_unregister(&mdev->ctrl);
> +	kfree(mdev->rx_pkt.iov_base);
> +}
> +
> +static struct device *tmel_get_device(void)
> +{
> +	struct tmel *tdev = tmeldev;
> +
> +	if (!tdev)
> +		return NULL;
> +
> +	return tdev->dev;
> +}
> +
> +static int tmel_prepare_msg(struct tmel *tdev, u32 msg_uid,
> +			    void *msg_buf, size_t msg_size)
> +{
> +	struct tmel_ipc_pkt *ipc_pkt = tdev->ipc_pkt;
> +	struct ipc_header *msg_hdr = &ipc_pkt->msg_hdr;
> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
> +	struct sram_payload *sram_payload = &ipc_pkt->payload.sram_payload;
> +	int ret;
> +
> +	memset(ipc_pkt, 0, sizeof(struct tmel_ipc_pkt));
> +
> +	msg_hdr->msg_type = TMEL_MSG_UID_MSG_TYPE(msg_uid);
> +	msg_hdr->action_id = TMEL_MSG_UID_ACTION_ID(msg_uid);
> +
> +	pr_debug("uid: %d, msg_size: %zu msg_type:%d, action_id:%d\n",
> +		 msg_uid, msg_size, msg_hdr->msg_type, msg_hdr->action_id);
> +
> +	if (sizeof(struct ipc_header) + msg_size <= MBOX_IPC_PACKET_SIZE) {
> +		/* Mbox only */
> +		msg_hdr->ipc_type = IPC_MBOX_ONLY;
> +		msg_hdr->msg_len = msg_size;
> +		memcpy((void *)mbox_payload, msg_buf, msg_size);
> +	} else if (msg_size <= SRAM_IPC_MAX_BUF_SIZE) {
> +		/* SRAM */
> +		msg_hdr->ipc_type = IPC_MBOX_SRAM;
> +		msg_hdr->msg_len = 8;
> +
> +		tdev->sram_dma_addr = dma_map_single(tdev->dev, msg_buf,
> +						     msg_size,
> +						     DMA_BIDIRECTIONAL);
> +		ret = dma_mapping_error(tdev->dev, tdev->sram_dma_addr);
> +		if (ret != 0) {

if (ret)

> +			pr_err("SRAM DMA mapping error: %d\n", ret);
> +			return ret;
> +		}
> +
> +		sram_payload->payload_ptr = tdev->sram_dma_addr;
> +		sram_payload->payload_len = msg_size;
> +	} else {
> +		pr_err("Invalid payload length: %zu\n", msg_size);

return error??

> +	}
> +
> +	return 0;
> +}
> +
> +static void tmel_unprepare_message(struct tmel *tdev,
> +				   void *msg_buf, size_t msg_size)
> +{
> +	struct tmel_ipc_pkt *ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
> +
> +	if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_ONLY) {
> +		memcpy(msg_buf, (void *)mbox_payload, msg_size);
> +	} else if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_SRAM) {
> +		dma_unmap_single(tdev->dev, tdev->sram_dma_addr, msg_size,
> +				 DMA_BIDIRECTIONAL);
> +		tdev->sram_dma_addr = 0;
> +	}
> +}
> +
> +static bool tmel_rx_done(struct tmel *tdev)
> +{
> +	return tdev->rx_done;
> +}
> +
> +static int tmel_process_request(u32 msg_uid, void *msg_buf,
> +				size_t msg_size)
> +{
> +	struct tmel *tdev = tmeldev;
> +	unsigned long jiffies;
> +	struct tmel_ipc_pkt *resp_ipc_pkt;
> +	long time_left = 0;
> +	int ret = 0;
> +
> +	/*
> +	 * Check to handle if probe is not successful or not completed yet
> +	 */
> +	if (!tdev) {
> +		pr_err("tmel dev is NULL\n");
> +		return -ENODEV;
> +	}
> +
> +	if (!msg_buf || !msg_size) {
> +		pr_err("Invalid msg_buf or msg_size\n");
> +		return -EINVAL;
> +	}
> +
> +	mutex_lock(&tdev->lock);
> +	tdev->rx_done = false;
> +
> +	ret = tmel_prepare_msg(tdev, msg_uid, msg_buf, msg_size);
> +	if (ret)

mutex_unlock?

> +		return ret;
> +
> +	tdev->pkt.iov_len = sizeof(struct tmel_ipc_pkt);
> +	tdev->pkt.iov_base = (void *)tdev->ipc_pkt;
> +
> +	qmp_send_data(tdev->mdev, &tdev->pkt);
> +	jiffies = msecs_to_jiffies(30000);

#define for 30000

> +
> +	time_left = wait_event_interruptible_timeout(tdev->waitq,
> +						     tmel_rx_done(tdev),
> +						     jiffies);
> +
> +	if (!time_left) {
> +		pr_err("Request timed out\n");
> +		ret = -ETIMEDOUT;
> +		goto err_exit;
> +	}
> +
> +	if (tdev->pkt.iov_len != sizeof(struct tmel_ipc_pkt)) {
> +		pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
> +		       tdev->pkt.iov_len, sizeof(struct tmel_ipc_pkt));
> +		ret = -EPROTO;
> +		goto err_exit;
> +	}
> +
> +	resp_ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
> +	tmel_unprepare_message(tdev, msg_buf, msg_size);
> +	tdev->rx_done = false;
> +	ret = resp_ipc_pkt->msg_hdr.response;
> +
> +err_exit:
> +	mutex_unlock(&tdev->lock);
> +	return ret;
> +}
> +
> +static int tmel_secboot_sec_auth(u32 sw_id, void *metadata, size_t size)
> +{
> +	struct device *dev = tmel_get_device();
> +	struct tmel_secboot_sec_auth *msg;
> +	dma_addr_t elf_buf_phys;
> +	void *elf_buf;
> +	int ret;
> +
> +	if (!dev || !metadata)
> +		return -EINVAL;
> +
> +	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
> +
> +	elf_buf = dma_alloc_coherent(dev, size, &elf_buf_phys, GFP_KERNEL);
> +	if (!elf_buf)

kfree(msg)

> +		return -ENOMEM;
> +
> +	memcpy(elf_buf, metadata, size);
> +
> +	msg->req.sw_id = sw_id;
> +	msg->req.elf_buf.buf = (u32)elf_buf_phys;
> +	msg->req.elf_buf.buf_len = (u32)size;
> +
> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SEC_AUTH, msg,
> +				   sizeof(struct tmel_secboot_sec_auth));
> +	if (ret) {
> +		pr_err("Failed to send IPC: %d\n", ret);
> +	} else if (msg->resp.status || msg->resp.extended_error) {
> +		pr_err("Failed with status: %d error: %d\n",
> +		       msg->resp.status, msg->resp.extended_error);
> +		ret = msg->resp.status;

If resp.status == 0 and resp.extended_error != 0, then the
function will return 0 (i.e. success). Is that correct?

> +	}
> +
> +	kfree(msg);
> +	dma_free_coherent(dev, size, elf_buf, elf_buf_phys);
> +
> +	return ret;
> +}
> +
> +static int tmel_secboot_teardown(u32 sw_id, u32 secondary_sw_id)
> +{
> +	struct device *dev = tmel_get_device();
> +	struct tmel_secboot_teardown msg = {0};
> +	int ret;
> +
> +	if (!dev)
> +		return -EINVAL;
> +
> +	msg.req.sw_id = sw_id;
> +	msg.req.secondary_sw_id = secondary_sw_id;
> +	msg.resp.status = TMEL_ERROR_GENERIC;
> +
> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN, &msg,
> +				   sizeof(msg));
> +	if (ret) {
> +		pr_err("Failed to send IPC: %d\n", ret);
> +	} else if (msg.resp.status) {
> +		pr_err("Failed with status: %d\n", msg.resp.status);
> +		ret = msg.resp.status;
> +	}
> +
> +	return ret;
> +}
> +
> +static int tmel_init(struct qmp_device *mdev)
> +{
> +	struct tmel *tdev;
> +
> +	tdev = devm_kzalloc(mdev->dev, sizeof(*tdev), GFP_KERNEL);
> +	if (!tdev)
> +		return -ENOMEM;
> +
> +	mutex_init(&tdev->lock);
> +
> +	tdev->ipc_pkt = devm_kzalloc(mdev->dev, sizeof(struct tmel_ipc_pkt),
> +				     GFP_KERNEL);
> +	if (!tdev->ipc_pkt)
> +		return -ENOMEM;
> +
> +	init_waitqueue_head(&tdev->waitq);
> +
> +	tdev->rx_done = false;
> +	tdev->dev = mdev->dev;
> +
> +	tmeldev = tdev;
> +	tmeldev->mdev = mdev;
> +
> +	return 0;
> +}
> +
> +static int tmel_qmp_send(struct mbox_chan *chan, void *data)
> +{
> +	struct qmp_device *mdev = chan->con_priv;
> +
> +	mdev->qwork.data =  data;
> +
> +	queue_work(system_wq, &mdev->qwork.work);
> +
> +	return 0;
> +}
> +
> +static void tmel_qmp_send_work(struct work_struct *work)
> +{
> +	struct qmp_work *qwork = container_of(work, struct qmp_work, work);
> +	struct qmp_device *mdev = tmeldev->mdev;
> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
> +
> +	struct tmel_qmp_msg *tmsg = qwork->data;
> +	struct tmel_sec_auth *smsg = tmsg->msg;
> +	int ret;

'ret' is unused. Can be removed?

> +
> +	switch (tmsg->msg_id) {
> +	case TMEL_MSG_UID_SECBOOT_SEC_AUTH:
> +		ret = tmel_secboot_sec_auth(smsg->pas_id,
> +					    smsg->data,
> +					    smsg->size);
> +		break;
> +	case TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN:
> +		ret = tmel_secboot_teardown(smsg->pas_id, 0);
> +		break;
> +	}
> +
> +	mbox_chan_txdone(chan, 0);
> +}
> +
> +/**
> + * tmel_qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
> + *		      device. Make sure the channel is not already in use.
> + * @mbox:       Mailbox device controlls the requested channel.
> + * @spec:       Device tree arguments to specify which channel is requested.
> + */
> +static struct mbox_chan *tmel_qmp_mbox_of_xlate(struct mbox_controller *mbox,
> +						const struct of_phandle_args *spec)
> +{
> +	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
> +	unsigned int channel = spec->args[0];
> +
> +	if (!mdev)
> +		return ERR_PTR(-EPROBE_DEFER);
> +
> +	if (channel >= mbox->num_chans)
> +		return ERR_PTR(-EINVAL);
> +
> +	mutex_lock(&mdev->state_lock);
> +	if (mdev->ch_in_use) {
> +		dev_err(mdev->dev, "mbox channel already in use\n");
> +		mutex_unlock(&mdev->state_lock);
> +		return ERR_PTR(-EBUSY);
> +	}
> +	mdev->ch_in_use = true;
> +	mutex_unlock(&mdev->state_lock);
> +
> +	return &mbox->chans[0];
> +}
> +
> +static struct mbox_chan_ops tmel_qmp_ops = {
> +	.startup = tmel_qmp_startup,
> +	.shutdown = tmel_qmp_shutdown,
> +	.send_data = tmel_qmp_send,
> +};
> +
> +static int tmel_qmp_probe(struct platform_device *pdev)
> +{
> +	struct device_node *node = pdev->dev.of_node;
> +	struct mbox_chan *chans;
> +	struct qmp_device *mdev;
> +	int ret = 0;
> +
> +	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
> +	if (!mdev)
> +		return -ENOMEM;
> +
> +	platform_set_drvdata(pdev, mdev);
> +
> +	ret = tmel_qmp_parse_devicetree(pdev, mdev);
> +	if (ret)
> +		return ret;
> +
> +	mdev->dev = &pdev->dev;
> +
> +	chans = devm_kzalloc(mdev->dev,
> +			     sizeof(*chans) * QMP_NUM_CHANS, GFP_KERNEL);
> +	if (!chans)
> +		return -ENOMEM;
> +
> +	INIT_WORK(&mdev->qwork.work, tmel_qmp_send_work);
> +
> +	mdev->ctrl.dev = &pdev->dev;
> +	mdev->ctrl.ops = &tmel_qmp_ops;
> +	mdev->ctrl.chans = chans;
> +	chans[0].con_priv = mdev;
> +	mdev->ctrl.num_chans = QMP_NUM_CHANS;
> +	mdev->ctrl.txdone_irq = true;
> +	mdev->ctrl.of_xlate = tmel_qmp_mbox_of_xlate;
> +
> +	ret = mbox_controller_register(&mdev->ctrl);
> +	if (ret) {
> +		dev_err(mdev->dev, "failed to register mbox controller\n");
> +		return ret;
> +	}
> +
> +	spin_lock_init(&mdev->tx_lock);
> +	mutex_init(&mdev->state_lock);
> +	mdev->local_state = LINK_DISCONNECTED;
> +	init_completion(&mdev->link_complete);
> +	init_completion(&mdev->ch_complete);
> +
> +	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
> +
> +	ret = platform_get_irq(pdev, 0);
> +
> +	ret = devm_request_threaded_irq(mdev->dev, ret,
> +					NULL, qmp_irq_handler,
> +					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
> +					node->name, (void *)mdev);
> +	if (ret < 0) {
> +		dev_err(mdev->dev, "request threaded irq failed, ret %d\n",
> +			ret);
> +
> +		tmel_qmp_remove(pdev);
> +		return ret;
> +	}
> +
> +	/* Receive any outstanding initial data */
> +	tmel_init(mdev);
> +	qmp_rx(mdev);
> +
> +	return 0;
> +}
> +
> +static const struct of_device_id tmel_qmp_dt_match[] = {
> +	{ .compatible = "qcom,ipq5424-tmel-qmp" },
> +	{},
> +};
> +
> +static struct platform_driver tmel_qmp_driver = {
> +	.driver = {
> +		.name = "tmel_qmp_mbox",
> +		.of_match_table = tmel_qmp_dt_match,
> +	},
> +	.probe = tmel_qmp_probe,
> +	.remove = tmel_qmp_remove,
> +};
> +module_platform_driver(tmel_qmp_driver);
> +
> +MODULE_DESCRIPTION("QCOM TMEL QMP DRIVER");
> +MODULE_LICENSE("GPL");
> diff --git a/include/linux/mailbox/tmelcom-qmp.h b/include/linux/mailbox/tmelcom-qmp.h
> new file mode 100644
> index 000000000000..9fa450eaf736
> --- /dev/null
> +++ b/include/linux/mailbox/tmelcom-qmp.h
> @@ -0,0 +1,157 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.

Might have to update year.

> + */
> +#ifndef _TMELCOM_H_
> +#define _TMELCOM_H_
> +
> +/*----------------------------------------------------------------------------
> + * Documentation
> + * --------------------------------------------------------------------------
> + */
> +
> +/*
> + * TMEL Messages Unique Identifiers bit layout
> +    _____________________________________
> +   |	   |	    |	   |

Alignment.

> +   | 31------16| 15-------8 | 7-------0 |
> +   | Reserved  |messageType | actionID  |
> +   |___________|____________|___________|
> +	       \___________  ___________/
> +			   \/
> +		      TMEL_MSG_UID
> +*/
> +
> +/*
> + * TMEL Messages Unique Identifiers Parameter ID bit layout
> +_________________________________________________________________________________________
> +|     |     |     |     |     |     |     |     |     |     |     |    |    |    |       |
> +|31-30|29-28|27-26|25-24|23-22|21-20|19-18|17-16|15-14|13-12|11-10|9--8|7--6|5--4|3-----0|
> +| p14 | p13 | p12 | p11 | p10 | p9  | p8  | p7  | p6  | p5  | p4  | p3 | p2 | p1 | nargs |
> +|type |type |type |type |type |type |type |type |type |type |type |type|type|type|       |
> +|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|____|____|____|_______|
> +
> +*/
> +
> +/*
> + * Macro used to define unique TMEL Message Identifier based on
> + * message type and action identifier.
> + */
> +#define TMEL_MSG_UID_CREATE(m, a)	((u32)(((m & 0xff) << 8) | (a & 0xff)))
> +
> +/** Helper macro to extract the messageType from TMEL_MSG_UID. */
> +#define TMEL_MSG_UID_MSG_TYPE(v)	((v & GENMASK(15, 8)) >> 8)
> +
> +/** Helper macro to extract the actionID from TMEL_MSG_UID. */
> +#define TMEL_MSG_UID_ACTION_ID(v)	(v & GENMASK(7, 0))
> +
> +/****************************************************************************
> + *
> + * All definitions of supported messageType's.

No apostrophe.

> + *
> + * 0x00 -> 0xF0 messageType used for production use cases.
> + * 0xF1 -> 0xFF messageType reserved(can be used for test puprposes).
> + *
> + * <Template> : TMEL_MSG_<MSGTYPE_NAME>
> + * **************************************************************************/
> +#define TMEL_MSG_SECBOOT		 0x00
> +
> +/****************************************************************************
> + *
> + * All definitions of action ID's per messageType.

No apostrophe.

> + *
> + * 0x00 -> 0xBF actionID used for production use cases.
> + * 0xC0 -> 0xFF messageType must be reserved for test use cases.
> + *
> + * NOTE: Test ID's shouldn't appear in this file.

No apostrophe in ID's.

> + *
> + * <Template> : TMEL_ACTION_<MSGTYPE_NAME>_<ACTIONID_NAME>
> + * **************************************************************************/
> +
> +/*
> + * ----------------------------------------------------------------------------
> +		Action ID's for TMEL_MSG_SECBOOT
> + * ------------------------------------------------------------------------
> + */
> +#define TMEL_ACTION_SECBOOT_SEC_AUTH		     0x04
> +#define TMEL_ACTION_SECBOOT_SS_TEAR_DOWN	     0x0A

Uppercase hex.

> +
> +/****************************************************************************
> + *
> + * All definitions of TMEL Message UID's (messageType | actionID).

No apostrophe.

> + *
> + * <Template> : TMEL_MSG_UID_<MSGTYPE_NAME>_<ACTIONID_NAME>
> + * *************************************************************************/
> +
> +/*----------------------------------------------------------------------------
> + * UID's for TMEL_MSG_SECBOOT
> + *-------------------------------------------------------------------------
> + */
> +#define TMEL_MSG_UID_SECBOOT_SEC_AUTH	    TMEL_MSG_UID_CREATE(TMEL_MSG_SECBOOT,\
> +					    TMEL_ACTION_SECBOOT_SEC_AUTH)
> +
> +#define TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN	TMEL_MSG_UID_CREATE(TMEL_MSG_SECBOOT,\
> +						TMEL_ACTION_SECBOOT_SS_TEAR_DOWN)
> +
> +#define HW_MBOX_SIZE			32
> +#define MBOX_QMP_CTRL_DATA_SIZE		4
> +#define MBOX_RSV_SIZE			4
> +#define MBOX_IPC_PACKET_SIZE		(HW_MBOX_SIZE - MBOX_QMP_CTRL_DATA_SIZE - MBOX_RSV_SIZE)
> +#define MBOX_IPC_MAX_PARAMS		5
> +
> +#define MAX_PARAM_IN_PARAM_ID		14
> +#define PARAM_CNT_FOR_PARAM_TYPE_OUTBUF	3
> +#define SRAM_IPC_MAX_PARAMS		(MAX_PARAM_IN_PARAM_ID * PARAM_CNT_FOR_PARAM_TYPE_OUTBUF)
> +#define SRAM_IPC_MAX_BUF_SIZE		(SRAM_IPC_MAX_PARAMS * sizeof(u32))
> +
> +#define TMEL_ERROR_GENERIC		(0x1U)
> +#define TMEL_ERROR_NOT_SUPPORTED	(0x2U)
> +#define TMEL_ERROR_BAD_PARAMETER	(0x3U)
> +#define TMEL_ERROR_BAD_MESSAGE		(0x4U)
> +#define TMEL_ERROR_BAD_ADDRESS		(0x5U)
> +#define TMEL_ERROR_TMELCOM_FAILURE	(0x6U)
> +#define TMEL_ERROR_TMEL_BUSY		(0x7U)


Please use 'u'.

> +
> +enum ipc_type {
> +	IPC_MBOX_ONLY,
> +	IPC_MBOX_SRAM,
> +};
> +
> +struct ipc_header {
> +	u8 ipc_type:1;
> +	u8 msg_len:7;
> +	u8 msg_type;
> +	u8 action_id;
> +	s8 response;
> +} __packed;
> +
> +struct mbox_payload {
> +	u32 param[MBOX_IPC_MAX_PARAMS];
> +};
> +
> +struct sram_payload {
> +	u32 payload_ptr;
> +	u32 payload_len;
> +};
> +
> +union ipc_payload {
> +	struct mbox_payload mbox_payload;
> +	struct sram_payload sram_payload;
> +} __packed;
> +
> +struct tmel_ipc_pkt {
> +	struct ipc_header msg_hdr;
> +	union ipc_payload payload;
> +} __packed;
> +
> +struct tmel_qmp_msg {
> +	void *msg;
> +	u32 msg_id;
> +};
> +
> +struct tmel_sec_auth {
> +	void *data;
> +	u32 size;
> +	u32 pas_id;
> +};
> +#endif  /*_TMELCOM_H_ */

Stray space after endif.
Add space after '/*'.

-Varada
Krzysztof Kozlowski Dec. 31, 2024, 8:06 a.m. UTC | #2
On 31/12/2024 06:49, Sricharan R wrote:
> From: Sricharan Ramabadhran <quic_srichara@quicinc.com>
> 
> This mailbox facilitates the communication between the TME-L server based
> subsystems (Q6) and the TME-L client (APPSS/BTSS/AUDIOSS), used for security

<form letter>
This is a friendly reminder during the review process.

It seems my or other reviewer's previous comments were not fully
addressed. Maybe the feedback got lost between the quotes, maybe you
just forgot to apply it. Please go back to the previous discussion and
either implement all requested changes or keep discussing them.

Thank you.
</form letter>

Still wrong wrapping.

> services like secure image authentication, enable/disable efuses, crypto
> services. Each client in the   SoC has its own block of message RAM and IRQ
> for communication with the TME-L SS. The protocol used to communicate in the
> message RAM is known as Qualcomm Messaging Protocol (QMP).
> 
> Remote proc driver subscribes to this mailbox and uses the mbox_send_message
> to use TME-L to securely authenticate/teardown the images.
> 
> Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
> ---
>   [v2] Added worker for mailbox tx processing, since some of the operations can sleep
>        Fixed checkpatch warnings. Some [CHECK] like below still exist, but that looks
>        like a false postive.
> 
>        CHECK: Macro argument 'm' may be better as '(m)' to avoid precedence issues
>         #1072: FILE: include/linux/mailbox/tmelcom-qmp.h:40:
>         +#define TMEL_MSG_UID_CREATE(m, a)      ((u32)(((m & 0xff) << 8) | (a & 0xff)))
> 
>  drivers/mailbox/Kconfig             |   7 +
>  drivers/mailbox/Makefile            |   2 +
>  drivers/mailbox/qcom-tmel-qmp.c     | 971 ++++++++++++++++++++++++++++
>  include/linux/mailbox/tmelcom-qmp.h | 157 +++++
>  4 files changed, 1137 insertions(+)
>  create mode 100644 drivers/mailbox/qcom-tmel-qmp.c
>  create mode 100644 include/linux/mailbox/tmelcom-qmp.h
> 
> diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
> index 8ecba7fb999e..8ad0b834d617 100644
> --- a/drivers/mailbox/Kconfig
> +++ b/drivers/mailbox/Kconfig
> @@ -306,4 +306,11 @@ config THEAD_TH1520_MBOX
>  	  kernel is running, and E902 core used for power management among other
>  	  things.
>  
> +config QCOM_TMEL_QMP_MAILBOX

Did you just place it at the end instead of gropped or sorted?

> +	tristate "QCOM Mailbox Protocol(QMP) for TME-L SS"
> +	help
> +	  Say yes to add support for the QMP Mailbox Protocol driver for TME-L.
> +	  QMP is a lightweight communication protocol for sending messages to
> +	  TME-L. This protocol fits into the Generic Mailbox Framework.
> +	  QMP uses a mailbox registers.
>  endif
> diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
> index 5f4f5b0ce2cc..4dba283a94ad 100644
> --- a/drivers/mailbox/Makefile
> +++ b/drivers/mailbox/Makefile
> @@ -66,3 +66,5 @@ obj-$(CONFIG_QCOM_CPUCP_MBOX)	+= qcom-cpucp-mbox.o
>  obj-$(CONFIG_QCOM_IPCC)		+= qcom-ipcc.o
>  
>  obj-$(CONFIG_THEAD_TH1520_MBOX)	+= mailbox-th1520.o
> +
> +obj-$(CONFIG_QCOM_TMEL_QMP_MAILBOX) += qcom-tmel-qmp.o

Same problem.

> diff --git a/drivers/mailbox/qcom-tmel-qmp.c b/drivers/mailbox/qcom-tmel-qmp.c
> new file mode 100644
> index 000000000000..6de0a418e0ae
> --- /dev/null
> +++ b/drivers/mailbox/qcom-tmel-qmp.c
> @@ -0,0 +1,971 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
> + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
> + */
> +
> +#include <linux/completion.h>
> +#include <linux/delay.h>
> +#include <linux/dma-direction.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/kernel.h>
> +#include <linux/kthread.h>
> +#include <linux/mailbox_client.h>
> +#include <linux/mailbox_controller.h>
> +#include <linux/mailbox/tmelcom-qmp.h>
> +#include <linux/module.h>
> +#include <linux/of.h>
> +#include <linux/of_irq.h>

Not used

> +#include <linux/of_platform.h>

Looks also not used

> +#include <linux/platform_device.h>
> +#include <linux/spinlock.h>
> +#include <linux/types.h>
> +#include <linux/uaccess.h>
> +#include <linux/uio.h>
> +#include <linux/workqueue.h>


Several headers here look unused.

> +
> +#define QMP_NUM_CHANS	0x1
> +#define QMP_TOUT_MS	1000
> +#define MBOX_ALIGN_BYTES	3
> +#define QMP_CTRL_DATA_SIZE	4
> +#define QMP_MAX_PKT_SIZE	0x18
> +#define QMP_UCORE_DESC_OFFSET	0x1000
> +
> +#define QMP_CH_VAR_GET(mdev, desc, var) ((mdev)->desc.bits.var)
> +#define QMP_CH_VAR_SET(mdev, desc, var) (mdev)->desc.bits.var = 1
> +#define QMP_CH_VAR_CLR(mdev, desc, var) (mdev)->desc.bits.var = 0
> +
> +#define QMP_MCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, mcore, var)
> +#define QMP_MCORE_CH_VAR_SET(mdev, var)	QMP_CH_VAR_SET(mdev, mcore, var)
> +#define QMP_MCORE_CH_VAR_CLR(mdev, var)	QMP_CH_VAR_CLR(mdev, mcore, var)
> +
> +#define QMP_MCORE_CH_VAR_TOGGLE(mdev, var) \
> +	(mdev)->mcore.bits.var = !((mdev)->mcore.bits.var)
> +#define QMP_MCORE_CH_ACKED_CHECK(mdev, var) \
> +	((mdev)->ucore.bits.var == (mdev)->mcore.bits.var##_ack)
> +#define QMP_MCORE_CH_ACK_UPDATE(mdev, var) \
> +	(mdev)->mcore.bits.var##_ack = (mdev)->ucore.bits.var
> +#define QMP_MCORE_CH_VAR_ACK_CLR(mdev, var) \
> +	(mdev)->mcore.bits.var##_ack = 0
> +
> +#define QMP_UCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, ucore, var)
> +#define QMP_UCORE_CH_ACKED_CHECK(mdev, var) \
> +	((mdev)->mcore.bits.var == (mdev)->ucore.bits.var##_ack)
> +#define QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, var) \
> +	((mdev)->ucore.bits.var != (mdev)->mcore.bits.var##_ack)
> +
> +/**
> + * enum qmp_local_state -	definition of the local state machine
> + * @LINK_DISCONNECTED:		Init state, waiting for ucore to start
> + * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
> + * @LINK_CONNECTED:		Link state up, channel not connected
> + * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
> + * @CHANNEL_CONNECTED:		Channel fully opened
> + * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack
> + */
> +enum qmp_local_state {
> +	LINK_DISCONNECTED,
> +	LINK_NEGOTIATION,
> +	LINK_CONNECTED,
> +	LOCAL_CONNECTING,
> +	CHANNEL_CONNECTED,
> +	LOCAL_DISCONNECTING,
> +};
> +
> +union channel_desc {
> +	struct {
> +		u32 link_state:1;
> +		u32 link_state_ack:1;
> +		u32 ch_state:1;
> +		u32 ch_state_ack:1;
> +		u32 tx:1;
> +		u32 tx_ack:1;
> +		u32 rx_done:1;
> +		u32 rx_done_ack:1;
> +		u32 read_int:1;
> +		u32 read_int_ack:1;
> +		u32 reserved:6;
> +		u32 frag_size:8;
> +		u32 rem_frag_count:8;
> +	} bits;
> +	unsigned int val;
> +};
> +
> +struct qmp_work {
> +	struct work_struct work;
> +	void *data;
> +};
> +
> +/**
> + * struct qmp_device - local information for managing a single mailbox
> + * @dev:	    The device that corresponds to this mailbox
> + * @ctrl:	    The mbox controller for this mailbox
> + * @mcore_desc:	    Local core (APSS) mailbox descriptor
> + * @ucore_desc:	    Remote core (TME-L) mailbox descriptor
> + * @mcore:	    Local core (APSS) channel descriptor
> + * @ucore:	    Remote core (TME-L) channel descriptor
> + * @rx_pkt:	    Buffer to pass to client, holds received data from mailbox
> + * @tx_pkt:	    Buffer from client, holds data to send on mailbox
> + * @mbox_client:    Mailbox client for the IPC interrupt
> + * @mbox_chan:	    Mailbox client chan for the IPC interrupt
> + * @local_state:    Current state of mailbox protocol
> + * @state_lock:	    Serialize mailbox state changes
> + * @tx_lock:	    Serialize access for writes to mailbox
> + * @link_complete:  Use to block until link negotiation with remote proc
> + * @ch_complete:    Use to block until the channel is fully opened
> + * @dwork:	    Delayed work to detect timed out tx
> + * @tx_sent:	    True if tx is sent and remote proc has not sent ack
> + */
> +struct qmp_device {
> +	struct device *dev;
> +	struct mbox_controller ctrl;
> +	struct qmp_work qwork;
> +
> +	void __iomem *mcore_desc;
> +	void __iomem *ucore_desc;
> +	union channel_desc mcore;
> +	union channel_desc ucore;
> +
> +	struct kvec rx_pkt;
> +	struct kvec tx_pkt;
> +
> +	struct mbox_client mbox_client;
> +	struct mbox_chan *mbox_chan;
> +
> +	enum qmp_local_state local_state;
> +
> +	/* Lock for QMP link state changes */

Vague

> +	struct mutex state_lock;
> +	/* Lock to serialize access to mailbox */

No, I don't see serialized access to mailbox. I see some parts of access
being protected. Write descriptive lock descriptions.

> +	spinlock_t tx_lock;
> +
> +	struct completion link_complete;
> +	struct completion ch_complete;
> +	struct delayed_work dwork;
> +	void *data;
> +
> +	bool tx_sent;
> +	bool ch_in_use;
> +};
> +
> +struct tmel_msg_param_type_buf_in {
> +	u32 buf;
> +	u32 buf_len;
> +};
> +
> +struct tmel_secboot_sec_auth_req {
> +	u32 sw_id;
> +	struct tmel_msg_param_type_buf_in elf_buf;
> +	struct tmel_msg_param_type_buf_in region_list;
> +	u32 relocate;
> +} __packed;
> +
> +struct tmel_secboot_sec_auth_resp {
> +	u32 first_seg_addr;
> +	u32 first_seg_len;
> +	u32 entry_addr;
> +	u32 extended_error;
> +	u32 status;
> +} __packed;
> +
> +struct tmel_secboot_sec_auth {
> +	struct tmel_secboot_sec_auth_req req;
> +	struct tmel_secboot_sec_auth_resp resp;
> +} __packed;
> +
> +struct tmel_secboot_teardown_req {
> +	u32 sw_id;
> +	u32 secondary_sw_id;
> +} __packed;
> +
> +struct tmel_secboot_teardown_resp {
> +	u32 status;
> +} __packed;
> +
> +struct tmel_secboot_teardown {
> +	struct tmel_secboot_teardown_req req;
> +	struct tmel_secboot_teardown_resp resp;
> +} __packed;
> +
> +struct tmel {
> +	struct device *dev;
> +	struct qmp_device *mdev;
> +	struct kvec pkt;
> +	/* To serialize incoming tmel request */

No, explain what is exactly protected. We all know that mutex serializes...

> +	struct mutex lock;
> +	struct tmel_ipc_pkt *ipc_pkt;
> +	dma_addr_t sram_dma_addr;
> +	wait_queue_head_t waitq;
> +	bool rx_done;
> +};
> +
> +static struct tmel *tmeldev;

NAK, First: it is not needed, second: even if it in this spaghetti code
it was needed, answer would be drop it and fix your code not to create
fake singletons.

> +
> +/**
> + * qmp_send_irq() - send an irq to a remote entity as an event signal.
> + * @mdev:       Which remote entity that should receive the irq.
> + */
> +static void qmp_send_irq(struct qmp_device *mdev)
> +{
> +	/* Update the mcore val to mcore register */
> +	iowrite32(mdev->mcore.val, mdev->mcore_desc);
> +	/* Ensure desc update is visible before IPC */
> +	wmb();
> +
> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
> +		mdev->mcore.val, mdev->ucore.val);
> +
> +	mbox_send_message(mdev->mbox_chan, NULL);
> +	mbox_client_txdone(mdev->mbox_chan, 0);
> +}
> +

...

> +static irqreturn_t qmp_irq_handler(int irq, void *priv)
> +{
> +	struct qmp_device *mdev = (struct qmp_device *)priv;
> +
> +	qmp_rx(mdev);
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int tmel_qmp_parse_devicetree(struct platform_device *pdev,
> +				     struct qmp_device *mdev)


Probe functions are always next to each other, not in other part of unit.

> +{
> +	struct device *dev = &pdev->dev;
> +
> +	mdev->mcore_desc = devm_platform_ioremap_resource(pdev, 0);
> +	if (!mdev->mcore_desc) {
> +		dev_err(dev, "ioremap failed for mcore reg\n");
> +		return -EIO;
> +	}
> +
> +	mdev->ucore_desc = mdev->mcore_desc + QMP_UCORE_DESC_OFFSET;
> +
> +	mdev->mbox_client.dev = dev;
> +	mdev->mbox_client.knows_txdone = false;
> +	mdev->mbox_chan = mbox_request_channel(&mdev->mbox_client, 0);
> +	if (IS_ERR(mdev->mbox_chan)) {
> +		dev_err(dev, "mbox chan for IPC is missing\n");

Syntax is: return dev_err_probe().

> +		return PTR_ERR(mdev->mbox_chan);
> +	}
> +
> +	return 0;
> +}
> +
> +static void tmel_qmp_remove(struct platform_device *pdev)

Again, why remove call is not next to probe?

> +{
> +	struct qmp_device *mdev = platform_get_drvdata(pdev);
> +
> +	mbox_controller_unregister(&mdev->ctrl);
> +	kfree(mdev->rx_pkt.iov_base);

I don't see this being allocated in probe.

> +}
> +
> +static struct device *tmel_get_device(void)
> +{
> +	struct tmel *tdev = tmeldev;

Nope. Do not create singletons.

> +
> +	if (!tdev)
> +		return NULL;
> +
> +	return tdev->dev;
> +}
> +
> +static int tmel_prepare_msg(struct tmel *tdev, u32 msg_uid,
> +			    void *msg_buf, size_t msg_size)
> +{
> +	struct tmel_ipc_pkt *ipc_pkt = tdev->ipc_pkt;
> +	struct ipc_header *msg_hdr = &ipc_pkt->msg_hdr;
> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
> +	struct sram_payload *sram_payload = &ipc_pkt->payload.sram_payload;
> +	int ret;
> +
> +	memset(ipc_pkt, 0, sizeof(struct tmel_ipc_pkt));
> +
> +	msg_hdr->msg_type = TMEL_MSG_UID_MSG_TYPE(msg_uid);
> +	msg_hdr->action_id = TMEL_MSG_UID_ACTION_ID(msg_uid);
> +
> +	pr_debug("uid: %d, msg_size: %zu msg_type:%d, action_id:%d\n",
> +		 msg_uid, msg_size, msg_hdr->msg_type, msg_hdr->action_id);

dev_dbg, stop using pr_ everywhere

> +
> +	if (sizeof(struct ipc_header) + msg_size <= MBOX_IPC_PACKET_SIZE) {
> +		/* Mbox only */
> +		msg_hdr->ipc_type = IPC_MBOX_ONLY;
> +		msg_hdr->msg_len = msg_size;
> +		memcpy((void *)mbox_payload, msg_buf, msg_size);
> +	} else if (msg_size <= SRAM_IPC_MAX_BUF_SIZE) {
> +		/* SRAM */
> +		msg_hdr->ipc_type = IPC_MBOX_SRAM;
> +		msg_hdr->msg_len = 8;
> +
> +		tdev->sram_dma_addr = dma_map_single(tdev->dev, msg_buf,
> +						     msg_size,
> +						     DMA_BIDIRECTIONAL);
> +		ret = dma_mapping_error(tdev->dev, tdev->sram_dma_addr);
> +		if (ret != 0) {
> +			pr_err("SRAM DMA mapping error: %d\n", ret);
> +			return ret;
> +		}
> +
> +		sram_payload->payload_ptr = tdev->sram_dma_addr;
> +		sram_payload->payload_len = msg_size;
> +	} else {
> +		pr_err("Invalid payload length: %zu\n", msg_size);

No, dev_err

> +	}
> +
> +	return 0;
> +}
> +
> +static void tmel_unprepare_message(struct tmel *tdev,
> +				   void *msg_buf, size_t msg_size)
> +{
> +	struct tmel_ipc_pkt *ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
> +
> +	if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_ONLY) {
> +		memcpy(msg_buf, (void *)mbox_payload, msg_size);
> +	} else if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_SRAM) {
> +		dma_unmap_single(tdev->dev, tdev->sram_dma_addr, msg_size,
> +				 DMA_BIDIRECTIONAL);
> +		tdev->sram_dma_addr = 0;
> +	}
> +}
> +
> +static bool tmel_rx_done(struct tmel *tdev)
> +{
> +	return tdev->rx_done;
> +}
> +
> +static int tmel_process_request(u32 msg_uid, void *msg_buf,
> +				size_t msg_size)
> +{
> +	struct tmel *tdev = tmeldev;
> +	unsigned long jiffies;
> +	struct tmel_ipc_pkt *resp_ipc_pkt;
> +	long time_left = 0;
> +	int ret = 0;
> +
> +	/*
> +	 * Check to handle if probe is not successful or not completed yet
> +	 */

No, it is impossible condition. This code cannot be called before probe.

Clean up your driver from such spaghetti prevention code and unspaghetti
it, so you will understand the code flow.


> +	if (!tdev) {
> +		pr_err("tmel dev is NULL\n");
> +		return -ENODEV;
> +	}
> +
> +	if (!msg_buf || !msg_size) {
> +		pr_err("Invalid msg_buf or msg_size\n");

No, use dev_err. This applies everywhere.

> +		return -EINVAL;
> +	}
> +
> +	mutex_lock(&tdev->lock);
> +	tdev->rx_done = false;
> +
> +	ret = tmel_prepare_msg(tdev, msg_uid, msg_buf, msg_size);
> +	if (ret)
> +		return ret;
> +
> +	tdev->pkt.iov_len = sizeof(struct tmel_ipc_pkt);
> +	tdev->pkt.iov_base = (void *)tdev->ipc_pkt;
> +
> +	qmp_send_data(tdev->mdev, &tdev->pkt);
> +	jiffies = msecs_to_jiffies(30000);
> +
> +	time_left = wait_event_interruptible_timeout(tdev->waitq,
> +						     tmel_rx_done(tdev),
> +						     jiffies);
> +
> +	if (!time_left) {
> +		pr_err("Request timed out\n");
> +		ret = -ETIMEDOUT;
> +		goto err_exit;
> +	}
> +
> +	if (tdev->pkt.iov_len != sizeof(struct tmel_ipc_pkt)) {
> +		pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
> +		       tdev->pkt.iov_len, sizeof(struct tmel_ipc_pkt));
> +		ret = -EPROTO;
> +		goto err_exit;
> +	}
> +
> +	resp_ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
> +	tmel_unprepare_message(tdev, msg_buf, msg_size);
> +	tdev->rx_done = false;
> +	ret = resp_ipc_pkt->msg_hdr.response;
> +
> +err_exit:
> +	mutex_unlock(&tdev->lock);
> +	return ret;
> +}
> +
> +static int tmel_secboot_sec_auth(u32 sw_id, void *metadata, size_t size)
> +{
> +	struct device *dev = tmel_get_device();
> +	struct tmel_secboot_sec_auth *msg;
> +	dma_addr_t elf_buf_phys;
> +	void *elf_buf;
> +	int ret;
> +
> +	if (!dev || !metadata)
> +		return -EINVAL;
> +
> +	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
> +
> +	elf_buf = dma_alloc_coherent(dev, size, &elf_buf_phys, GFP_KERNEL);
> +	if (!elf_buf)
> +		return -ENOMEM;
> +
> +	memcpy(elf_buf, metadata, size);
> +
> +	msg->req.sw_id = sw_id;
> +	msg->req.elf_buf.buf = (u32)elf_buf_phys;
> +	msg->req.elf_buf.buf_len = (u32)size;
> +
> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SEC_AUTH, msg,
> +				   sizeof(struct tmel_secboot_sec_auth));
> +	if (ret) {
> +		pr_err("Failed to send IPC: %d\n", ret);
> +	} else if (msg->resp.status || msg->resp.extended_error) {
> +		pr_err("Failed with status: %d error: %d\n",
> +		       msg->resp.status, msg->resp.extended_error);
> +		ret = msg->resp.status;
> +	}
> +
> +	kfree(msg);
> +	dma_free_coherent(dev, size, elf_buf, elf_buf_phys);
> +
> +	return ret;
> +}
> +
> +static int tmel_secboot_teardown(u32 sw_id, u32 secondary_sw_id)
> +{
> +	struct device *dev = tmel_get_device();
> +	struct tmel_secboot_teardown msg = {0};
> +	int ret;
> +
> +	if (!dev)
> +		return -EINVAL;
> +
> +	msg.req.sw_id = sw_id;
> +	msg.req.secondary_sw_id = secondary_sw_id;
> +	msg.resp.status = TMEL_ERROR_GENERIC;
> +
> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN, &msg,
> +				   sizeof(msg));
> +	if (ret) {
> +		pr_err("Failed to send IPC: %d\n", ret);
> +	} else if (msg.resp.status) {
> +		pr_err("Failed with status: %d\n", msg.resp.status);
> +		ret = msg.resp.status;
> +	}
> +
> +	return ret;
> +}
> +
> +static int tmel_init(struct qmp_device *mdev)
> +{
> +	struct tmel *tdev;
> +
> +	tdev = devm_kzalloc(mdev->dev, sizeof(*tdev), GFP_KERNEL);
> +	if (!tdev)
> +		return -ENOMEM;
> +
> +	mutex_init(&tdev->lock);
> +
> +	tdev->ipc_pkt = devm_kzalloc(mdev->dev, sizeof(struct tmel_ipc_pkt),
> +				     GFP_KERNEL);
> +	if (!tdev->ipc_pkt)
> +		return -ENOMEM;
> +
> +	init_waitqueue_head(&tdev->waitq);
> +
> +	tdev->rx_done = false;
> +	tdev->dev = mdev->dev;
> +
> +	tmeldev = tdev;
> +	tmeldev->mdev = mdev;
> +
> +	return 0;
> +}
> +
> +static int tmel_qmp_send(struct mbox_chan *chan, void *data)
> +{
> +	struct qmp_device *mdev = chan->con_priv;
> +
> +	mdev->qwork.data =  data;
> +
> +	queue_work(system_wq, &mdev->qwork.work);
> +
> +	return 0;
> +}
> +
> +static void tmel_qmp_send_work(struct work_struct *work)
> +{
> +	struct qmp_work *qwork = container_of(work, struct qmp_work, work);
> +	struct qmp_device *mdev = tmeldev->mdev;
> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
> +
> +	struct tmel_qmp_msg *tmsg = qwork->data;
> +	struct tmel_sec_auth *smsg = tmsg->msg;
> +	int ret;
> +
> +	switch (tmsg->msg_id) {
> +	case TMEL_MSG_UID_SECBOOT_SEC_AUTH:
> +		ret = tmel_secboot_sec_auth(smsg->pas_id,
> +					    smsg->data,
> +					    smsg->size);
> +		break;
> +	case TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN:
> +		ret = tmel_secboot_teardown(smsg->pas_id, 0);
> +		break;
> +	}
> +
> +	mbox_chan_txdone(chan, 0);
> +}
> +
> +/**
> + * tmel_qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
> + *		      device. Make sure the channel is not already in use.
> + * @mbox:       Mailbox device controlls the requested channel.
> + * @spec:       Device tree arguments to specify which channel is requested.
> + */
> +static struct mbox_chan *tmel_qmp_mbox_of_xlate(struct mbox_controller *mbox,
> +						const struct of_phandle_args *spec)
> +{
> +	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
> +	unsigned int channel = spec->args[0];
> +
> +	if (!mdev)
> +		return ERR_PTR(-EPROBE_DEFER);
> +
> +	if (channel >= mbox->num_chans)
> +		return ERR_PTR(-EINVAL);
> +
> +	mutex_lock(&mdev->state_lock);
> +	if (mdev->ch_in_use) {
> +		dev_err(mdev->dev, "mbox channel already in use\n");
> +		mutex_unlock(&mdev->state_lock);
> +		return ERR_PTR(-EBUSY);


Why one cannot call xlate twice for the same argument? This looks wrong.
Xlate does not mean that mailbox is being used.


> +	}
> +	mdev->ch_in_use = true;
> +	mutex_unlock(&mdev->state_lock);
> +
> +	return &mbox->chans[0];
> +}
> +
> +static struct mbox_chan_ops tmel_qmp_ops = {
> +	.startup = tmel_qmp_startup,
> +	.shutdown = tmel_qmp_shutdown,
> +	.send_data = tmel_qmp_send,
> +};
> +
> +static int tmel_qmp_probe(struct platform_device *pdev)
> +{
> +	struct device_node *node = pdev->dev.of_node;
> +	struct mbox_chan *chans;
> +	struct qmp_device *mdev;
> +	int ret = 0;
> +
> +	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
> +	if (!mdev)
> +		return -ENOMEM;
> +
> +	platform_set_drvdata(pdev, mdev);
> +
> +	ret = tmel_qmp_parse_devicetree(pdev, mdev);
> +	if (ret)
> +		return ret;
> +
> +	mdev->dev = &pdev->dev;
> +
> +	chans = devm_kzalloc(mdev->dev,


devm_kcalloc

> +			     sizeof(*chans) * QMP_NUM_CHANS, GFP_KERNEL);
> +	if (!chans)
> +		return -ENOMEM;
> +
> +	INIT_WORK(&mdev->qwork.work, tmel_qmp_send_work);
> +
> +	mdev->ctrl.dev = &pdev->dev;
> +	mdev->ctrl.ops = &tmel_qmp_ops;
> +	mdev->ctrl.chans = chans;
> +	chans[0].con_priv = mdev;
> +	mdev->ctrl.num_chans = QMP_NUM_CHANS;
> +	mdev->ctrl.txdone_irq = true;
> +	mdev->ctrl.of_xlate = tmel_qmp_mbox_of_xlate;
> +
> +	ret = mbox_controller_register(&mdev->ctrl);
> +	if (ret) {
> +		dev_err(mdev->dev, "failed to register mbox controller\n");
> +		return ret;
> +	}
> +
> +	spin_lock_init(&mdev->tx_lock);
> +	mutex_init(&mdev->state_lock);
> +	mdev->local_state = LINK_DISCONNECTED;
> +	init_completion(&mdev->link_complete);
> +	init_completion(&mdev->ch_complete);
> +
> +	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
> +
> +	ret = platform_get_irq(pdev, 0);
> +
> +	ret = devm_request_threaded_irq(mdev->dev, ret,
> +					NULL, qmp_irq_handler,
> +					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
> +					node->name, (void *)mdev);
> +	if (ret < 0) {
> +		dev_err(mdev->dev, "request threaded irq failed, ret %d\n",
> +			ret);

dev_err_probe

> +




> +
> +static const struct of_device_id tmel_qmp_dt_match[] = {
> +	{ .compatible = "qcom,ipq5424-tmel-qmp" },
> +	{},
> +};
> +
> +static struct platform_driver tmel_qmp_driver = {
> +	.driver = {
> +		.name = "tmel_qmp_mbox",
> +		.of_match_table = tmel_qmp_dt_match,
> +	},
> +	.probe = tmel_qmp_probe,
> +	.remove = tmel_qmp_remove,
> +};
> +module_platform_driver(tmel_qmp_driver);
> +
> +MODULE_DESCRIPTION("QCOM TMEL QMP DRIVER");

"driver", this is not an acronym.

> +MODULE_LICENSE("GPL");
Best regards,
Krzysztof
Dmitry Baryshkov Dec. 31, 2024, 4:22 p.m. UTC | #3
On Tue, Dec 31, 2024 at 11:19:00AM +0530, Sricharan R wrote:
> From: Sricharan Ramabadhran <quic_srichara@quicinc.com>
> 
> This mailbox facilitates the communication between the TME-L server based
> subsystems (Q6) and the TME-L client (APPSS/BTSS/AUDIOSS), used for security
> services like secure image authentication, enable/disable efuses, crypto
> services. Each client in the   SoC has its own block of message RAM and IRQ
> for communication with the TME-L SS. The protocol used to communicate in the
> message RAM is known as Qualcomm Messaging Protocol (QMP).
> 
> Remote proc driver subscribes to this mailbox and uses the mbox_send_message
> to use TME-L to securely authenticate/teardown the images.

You seem to be doing a lot of plays with __iomem-related data. Are you
sure your driver passes sparse checks?

> 
> Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
> ---
>   [v2] Added worker for mailbox tx processing, since some of the operations can sleep
>        Fixed checkpatch warnings. Some [CHECK] like below still exist, but that looks
>        like a false postive.
> 
>        CHECK: Macro argument 'm' may be better as '(m)' to avoid precedence issues
>         #1072: FILE: include/linux/mailbox/tmelcom-qmp.h:40:
>         +#define TMEL_MSG_UID_CREATE(m, a)      ((u32)(((m & 0xff) << 8) | (a & 0xff)))

It is not, please implement the suggestion.

> 
>  drivers/mailbox/Kconfig             |   7 +
>  drivers/mailbox/Makefile            |   2 +
>  drivers/mailbox/qcom-tmel-qmp.c     | 971 ++++++++++++++++++++++++++++
>  include/linux/mailbox/tmelcom-qmp.h | 157 +++++
>  4 files changed, 1137 insertions(+)
>  create mode 100644 drivers/mailbox/qcom-tmel-qmp.c
>  create mode 100644 include/linux/mailbox/tmelcom-qmp.h
> 
> diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
> index 8ecba7fb999e..8ad0b834d617 100644
> --- a/drivers/mailbox/Kconfig
> +++ b/drivers/mailbox/Kconfig
> @@ -306,4 +306,11 @@ config THEAD_TH1520_MBOX
>  	  kernel is running, and E902 core used for power management among other
>  	  things.
>  
> +config QCOM_TMEL_QMP_MAILBOX
> +	tristate "QCOM Mailbox Protocol(QMP) for TME-L SS"

What is TME-L (or TMEL) SS? AmSamoa? South Sudan? ß? Schutzstaffel?

> +	help
> +	  Say yes to add support for the QMP Mailbox Protocol driver for TME-L.
> +	  QMP is a lightweight communication protocol for sending messages to
> +	  TME-L. This protocol fits into the Generic Mailbox Framework.
> +	  QMP uses a mailbox registers.
>  endif
> diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
> index 5f4f5b0ce2cc..4dba283a94ad 100644
> --- a/drivers/mailbox/Makefile
> +++ b/drivers/mailbox/Makefile
> @@ -66,3 +66,5 @@ obj-$(CONFIG_QCOM_CPUCP_MBOX)	+= qcom-cpucp-mbox.o
>  obj-$(CONFIG_QCOM_IPCC)		+= qcom-ipcc.o
>  
>  obj-$(CONFIG_THEAD_TH1520_MBOX)	+= mailbox-th1520.o
> +
> +obj-$(CONFIG_QCOM_TMEL_QMP_MAILBOX) += qcom-tmel-qmp.o
> diff --git a/drivers/mailbox/qcom-tmel-qmp.c b/drivers/mailbox/qcom-tmel-qmp.c
> new file mode 100644
> index 000000000000..6de0a418e0ae
> --- /dev/null
> +++ b/drivers/mailbox/qcom-tmel-qmp.c
> @@ -0,0 +1,971 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
> + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
> + */
> +
> +#include <linux/completion.h>
> +#include <linux/delay.h>
> +#include <linux/dma-direction.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/io.h>
> +#include <linux/kernel.h>
> +#include <linux/kthread.h>
> +#include <linux/mailbox_client.h>
> +#include <linux/mailbox_controller.h>
> +#include <linux/mailbox/tmelcom-qmp.h>
> +#include <linux/module.h>
> +#include <linux/of.h>
> +#include <linux/of_irq.h>
> +#include <linux/of_platform.h>
> +#include <linux/platform_device.h>
> +#include <linux/spinlock.h>
> +#include <linux/types.h>
> +#include <linux/uaccess.h>
> +#include <linux/uio.h>
> +#include <linux/workqueue.h>
> +
> +#define QMP_NUM_CHANS	0x1
> +#define QMP_TOUT_MS	1000
> +#define MBOX_ALIGN_BYTES	3
> +#define QMP_CTRL_DATA_SIZE	4
> +#define QMP_MAX_PKT_SIZE	0x18
> +#define QMP_UCORE_DESC_OFFSET	0x1000
> +
> +#define QMP_CH_VAR_GET(mdev, desc, var) ((mdev)->desc.bits.var)
> +#define QMP_CH_VAR_SET(mdev, desc, var) (mdev)->desc.bits.var = 1
> +#define QMP_CH_VAR_CLR(mdev, desc, var) (mdev)->desc.bits.var = 0

Inline.

> +
> +#define QMP_MCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, mcore, var)
> +#define QMP_MCORE_CH_VAR_SET(mdev, var)	QMP_CH_VAR_SET(mdev, mcore, var)
> +#define QMP_MCORE_CH_VAR_CLR(mdev, var)	QMP_CH_VAR_CLR(mdev, mcore, var)

Inline. No wrappers around wrappers around wrappers.

> +
> +#define QMP_MCORE_CH_VAR_TOGGLE(mdev, var) \
> +	(mdev)->mcore.bits.var = !((mdev)->mcore.bits.var)
> +#define QMP_MCORE_CH_ACKED_CHECK(mdev, var) \
> +	((mdev)->ucore.bits.var == (mdev)->mcore.bits.var##_ack)
> +#define QMP_MCORE_CH_ACK_UPDATE(mdev, var) \
> +	(mdev)->mcore.bits.var##_ack = (mdev)->ucore.bits.var
> +#define QMP_MCORE_CH_VAR_ACK_CLR(mdev, var) \
> +	(mdev)->mcore.bits.var##_ack = 0

Ugh, no.

> +
> +#define QMP_UCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, ucore, var)
> +#define QMP_UCORE_CH_ACKED_CHECK(mdev, var) \
> +	((mdev)->mcore.bits.var == (mdev)->ucore.bits.var##_ack)
> +#define QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, var) \
> +	((mdev)->ucore.bits.var != (mdev)->mcore.bits.var##_ack)
> +
> +/**
> + * enum qmp_local_state -	definition of the local state machine
> + * @LINK_DISCONNECTED:		Init state, waiting for ucore to start

What is ucore?

> + * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
> + * @LINK_CONNECTED:		Link state up, channel not connected
> + * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
> + * @CHANNEL_CONNECTED:		Channel fully opened
> + * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack

Unindent, please. At least the top line shouldn't have it.

> + */
> +enum qmp_local_state {
> +	LINK_DISCONNECTED,
> +	LINK_NEGOTIATION,
> +	LINK_CONNECTED,
> +	LOCAL_CONNECTING,
> +	CHANNEL_CONNECTED,
> +	LOCAL_DISCONNECTING,
> +};
> +
> +union channel_desc {
> +	struct {
> +		u32 link_state:1;
> +		u32 link_state_ack:1;
> +		u32 ch_state:1;
> +		u32 ch_state_ack:1;
> +		u32 tx:1;
> +		u32 tx_ack:1;
> +		u32 rx_done:1;
> +		u32 rx_done_ack:1;
> +		u32 read_int:1;
> +		u32 read_int_ack:1;
> +		u32 reserved:6;
> +		u32 frag_size:8;
> +		u32 rem_frag_count:8;
> +	} bits;
> +	unsigned int val;
> +};
> +
> +struct qmp_work {
> +	struct work_struct work;
> +	void *data;
> +};
> +
> +/**
> + * struct qmp_device - local information for managing a single mailbox
> + * @dev:	    The device that corresponds to this mailbox
> + * @ctrl:	    The mbox controller for this mailbox
> + * @mcore_desc:	    Local core (APSS) mailbox descriptor
> + * @ucore_desc:	    Remote core (TME-L) mailbox descriptor
> + * @mcore:	    Local core (APSS) channel descriptor
> + * @ucore:	    Remote core (TME-L) channel descriptor
> + * @rx_pkt:	    Buffer to pass to client, holds received data from mailbox
> + * @tx_pkt:	    Buffer from client, holds data to send on mailbox
> + * @mbox_client:    Mailbox client for the IPC interrupt
> + * @mbox_chan:	    Mailbox client chan for the IPC interrupt
> + * @local_state:    Current state of mailbox protocol
> + * @state_lock:	    Serialize mailbox state changes
> + * @tx_lock:	    Serialize access for writes to mailbox
> + * @link_complete:  Use to block until link negotiation with remote proc
> + * @ch_complete:    Use to block until the channel is fully opened
> + * @dwork:	    Delayed work to detect timed out tx
> + * @tx_sent:	    True if tx is sent and remote proc has not sent ack
> + */
> +struct qmp_device {
> +	struct device *dev;
> +	struct mbox_controller ctrl;
> +	struct qmp_work qwork;
> +
> +	void __iomem *mcore_desc;
> +	void __iomem *ucore_desc;
> +	union channel_desc mcore;
> +	union channel_desc ucore;
> +
> +	struct kvec rx_pkt;
> +	struct kvec tx_pkt;
> +
> +	struct mbox_client mbox_client;
> +	struct mbox_chan *mbox_chan;
> +
> +	enum qmp_local_state local_state;
> +
> +	/* Lock for QMP link state changes */
> +	struct mutex state_lock;
> +	/* Lock to serialize access to mailbox */
> +	spinlock_t tx_lock;
> +
> +	struct completion link_complete;
> +	struct completion ch_complete;
> +	struct delayed_work dwork;
> +	void *data;
> +
> +	bool tx_sent;
> +	bool ch_in_use;
> +};
> +
> +struct tmel_msg_param_type_buf_in {
> +	u32 buf;
> +	u32 buf_len;
> +};
> +
> +struct tmel_secboot_sec_auth_req {
> +	u32 sw_id;
> +	struct tmel_msg_param_type_buf_in elf_buf;
> +	struct tmel_msg_param_type_buf_in region_list;
> +	u32 relocate;
> +} __packed;
> +
> +struct tmel_secboot_sec_auth_resp {
> +	u32 first_seg_addr;
> +	u32 first_seg_len;
> +	u32 entry_addr;
> +	u32 extended_error;
> +	u32 status;
> +} __packed;
> +
> +struct tmel_secboot_sec_auth {
> +	struct tmel_secboot_sec_auth_req req;
> +	struct tmel_secboot_sec_auth_resp resp;
> +} __packed;
> +
> +struct tmel_secboot_teardown_req {
> +	u32 sw_id;
> +	u32 secondary_sw_id;
> +} __packed;
> +
> +struct tmel_secboot_teardown_resp {
> +	u32 status;
> +} __packed;
> +
> +struct tmel_secboot_teardown {
> +	struct tmel_secboot_teardown_req req;
> +	struct tmel_secboot_teardown_resp resp;
> +} __packed;
> +
> +struct tmel {
> +	struct device *dev;
> +	struct qmp_device *mdev;
> +	struct kvec pkt;
> +	/* To serialize incoming tmel request */
> +	struct mutex lock;
> +	struct tmel_ipc_pkt *ipc_pkt;
> +	dma_addr_t sram_dma_addr;
> +	wait_queue_head_t waitq;
> +	bool rx_done;
> +};
> +
> +static struct tmel *tmeldev;

What? Unprotected globabl static variable? Why do you need it at all?
Drop immediately.

> +
> +/**
> + * qmp_send_irq() - send an irq to a remote entity as an event signal.
> + * @mdev:       Which remote entity that should receive the irq.
> + */
> +static void qmp_send_irq(struct qmp_device *mdev)
> +{
> +	/* Update the mcore val to mcore register */

What is the use for such comments?

> +	iowrite32(mdev->mcore.val, mdev->mcore_desc);
> +	/* Ensure desc update is visible before IPC */
> +	wmb();
> +
> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
> +		mdev->mcore.val, mdev->ucore.val);
> +
> +	mbox_send_message(mdev->mbox_chan, NULL);
> +	mbox_client_txdone(mdev->mbox_chan, 0);
> +}
> +
> +/**
> + * qmp_notify_timeout() - Notify client of tx timeout with -ETIME
> + * @work:		  Structure for work that was scheduled.
> + */
> +static void qmp_notify_timeout(struct work_struct *work)
> +{
> +	struct delayed_work *dwork = to_delayed_work(work);
> +	struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
> +	int err = -ETIME;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&mdev->tx_lock, flags);
> +	if (!mdev->tx_sent) {
> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +		return;
> +	}
> +	mdev->tx_sent = false;
> +	spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +	dev_dbg(mdev->dev, "%s: TX timeout", __func__);
> +	mbox_chan_txdone(chan, err);
> +}
> +
> +static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
> +{
> +	schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TOUT_MS));
> +}
> +
> +/**
> + * tmel_qmp_startup() - Start qmp mailbox channel for communication. Waits for
> + *		       remote subsystem to open channel if link is not
> + *		       initated or until timeout.
> + * @chan:	       mailbox channel that is being opened.
> + *
> + * Return: 0 on succes or standard Linux error code.
> + */
> +static int tmel_qmp_startup(struct mbox_chan *chan)
> +{
> +	struct qmp_device *mdev = chan->con_priv;
> +	int ret;
> +
> +	if (!mdev)
> +		return -EINVAL;

Is it a real case or just protective coding?

> +
> +	ret = wait_for_completion_timeout(&mdev->link_complete,
> +					  msecs_to_jiffies(QMP_TOUT_MS));
> +	if (!ret)
> +		return -EAGAIN;
> +
> +	mutex_lock(&mdev->state_lock);
> +	if (mdev->local_state == LINK_CONNECTED) {
> +		QMP_MCORE_CH_VAR_SET(mdev, ch_state);
> +		mdev->local_state = LOCAL_CONNECTING;
> +		dev_dbg(mdev->dev, "link complete, local connecting");
> +		qmp_send_irq(mdev);
> +	}
> +	mutex_unlock(&mdev->state_lock);
> +
> +	ret = wait_for_completion_timeout(&mdev->ch_complete,
> +					  msecs_to_jiffies(QMP_TOUT_MS));
> +	if (!ret)
> +		return -ETIME;
> +
> +	return 0;
> +}
> +
> +/**
> + * qmp_send_data() - Copy the data to the channel's mailbox and notify
> + *		     remote subsystem of new data. This function will
> + *		     return an error if the previous message sent has
> + *		     not been read. Cannot Sleep.
> + * @chan:	mailbox channel that data is to be sent over.
> + * @data:	Data to be sent to remote processor, should be in the format of
> + *		a kvec.
> + *
> + * Return: 0 on succes or standard Linux error code.
> + */
> +static int qmp_send_data(struct qmp_device *mdev, void *data)
> +{
> +	struct kvec *pkt = (struct kvec *)data;
> +	void __iomem *addr;
> +	unsigned long flags;
> +
> +	if (!mdev || !data || !completion_done(&mdev->ch_complete))
> +		return -EINVAL;

Is it a real case or just protective coding?

> +
> +	if (pkt->iov_len > QMP_MAX_PKT_SIZE) {
> +		dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
> +		return -EINVAL;
> +	}
> +
> +	spin_lock_irqsave(&mdev->tx_lock, flags);
> +	if (mdev->tx_sent) {
> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +		return -EAGAIN;
> +	}
> +
> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
> +		mdev->mcore.val, mdev->ucore.val);
> +
> +	addr = mdev->mcore_desc + QMP_CTRL_DATA_SIZE;
> +	memcpy_toio(addr, pkt->iov_base, pkt->iov_len);
> +
> +	mdev->mcore.bits.frag_size = pkt->iov_len;
> +	mdev->mcore.bits.rem_frag_count = 0;
> +
> +	dev_dbg(mdev->dev, "Copied buffer to mbox, sz: %d",
> +		mdev->mcore.bits.frag_size);
> +
> +	mdev->tx_sent = true;
> +	QMP_MCORE_CH_VAR_TOGGLE(mdev, tx);
> +	qmp_send_irq(mdev);
> +	qmp_schedule_tx_timeout(mdev);
> +	spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +
> +	return 0;
> +}
> +
> +/**
> + * tmel_qmp_shutdown() - Disconnect this mailbox channel so the client does not
> + *			 receive anymore data and can reliquish control
> + *			 of the channel.
> + * @chan:		 mailbox channel to be shutdown.
> + */
> +static void tmel_qmp_shutdown(struct mbox_chan *chan)
> +{
> +	struct qmp_device *mdev = chan->con_priv;
> +
> +	mutex_lock(&mdev->state_lock);
> +	if (mdev->local_state != LINK_DISCONNECTED) {
> +		mdev->local_state = LOCAL_DISCONNECTING;
> +		QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
> +		qmp_send_irq(mdev);
> +	}
> +	mutex_unlock(&mdev->state_lock);
> +}
> +
> +static void tmel_receive_message(void *message)
> +{
> +	struct tmel *tdev = tmeldev;
> +	struct kvec *pkt = NULL;
> +
> +	if (!message) {
> +		pr_err("spurious message received\n");

s/pr_/dev_/ all over the place.

> +		goto tmel_receive_end;
> +	}
> +
> +	if (tdev->rx_done) {
> +		pr_err("tmel response pending\n");
> +		goto tmel_receive_end;
> +	}
> +
> +	pkt = (struct kvec *)message;
> +	tdev->pkt.iov_len = pkt->iov_len;
> +	tdev->pkt.iov_base = pkt->iov_base;
> +	tdev->rx_done = true;
> +
> +tmel_receive_end:
> +	wake_up_interruptible(&tdev->waitq);
> +}
> +
> +/**
> + * qmp_recv_data() -	received notification that data is available in the
> + *			mailbox. Copy data from mailbox and pass to client.
> + * @mbox:		mailbox device that received the notification.
> + * @mbox_of:		offset of mailbox after QMP Control data.
> + */
> +static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
> +{
> +	void __iomem *addr;
> +	struct kvec *pkt;
> +
> +	addr = mdev->ucore_desc + mbox_of;
> +	pkt = &mdev->rx_pkt;
> +	pkt->iov_len = mdev->ucore.bits.frag_size;
> +
> +	memcpy_fromio(pkt->iov_base, addr, pkt->iov_len);
> +	QMP_MCORE_CH_ACK_UPDATE(mdev, tx);
> +	dev_dbg(mdev->dev, "%s: Send RX data to TMEL Client", __func__);
> +	tmel_receive_message(pkt);
> +
> +	QMP_MCORE_CH_VAR_TOGGLE(mdev, rx_done);
> +	qmp_send_irq(mdev);
> +}
> +
> +/**
> + * clr_mcore_ch_state() - Clear the mcore state of a mailbox.
> + * @mdev:	mailbox device to be initialized.
> + */
> +static void clr_mcore_ch_state(struct qmp_device *mdev)
> +{
> +	QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, ch_state);
> +
> +	QMP_MCORE_CH_VAR_CLR(mdev, tx);
> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, tx);
> +
> +	QMP_MCORE_CH_VAR_CLR(mdev, rx_done);
> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, rx_done);
> +
> +	QMP_MCORE_CH_VAR_CLR(mdev, read_int);
> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, read_int);
> +
> +	mdev->mcore.bits.frag_size = 0;
> +	mdev->mcore.bits.rem_frag_count = 0;
> +}
> +
> +/**
> + * qmp_rx() - Handle incoming messages from remote processor.
> + * @mbox:	mailbox device that received notification.
> + */
> +static void qmp_rx(struct qmp_device *mdev)
> +{
> +	unsigned long flags;
> +
> +	/* read remote_desc from mailbox register */
> +	mdev->ucore.val = ioread32(mdev->ucore_desc);
> +
> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
> +		mdev->mcore.val, mdev->ucore.val);
> +
> +	mutex_lock(&mdev->state_lock);
> +
> +	/* Check if remote link down */
> +	if (mdev->local_state >= LINK_CONNECTED &&
> +	    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
> +		mdev->local_state = LINK_NEGOTIATION;
> +		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
> +		qmp_send_irq(mdev);
> +		mutex_unlock(&mdev->state_lock);
> +		return;
> +	}
> +
> +	switch (mdev->local_state) {
> +	case LINK_DISCONNECTED:
> +		QMP_MCORE_CH_VAR_SET(mdev, link_state);
> +		mdev->local_state = LINK_NEGOTIATION;
> +		mdev->rx_pkt.iov_base = kzalloc(QMP_MAX_PKT_SIZE,
> +						GFP_KERNEL);

Move to probe, use devm.

> +
> +		if (!mdev->rx_pkt.iov_base) {
> +			dev_err(mdev->dev, "rx pkt alloc failed");
> +			break;
> +		}
> +		dev_dbg(mdev->dev, "Set to link negotiation");
> +		qmp_send_irq(mdev);
> +
> +		break;
> +	case LINK_NEGOTIATION:
> +		if (!QMP_MCORE_CH_VAR_GET(mdev, link_state) ||
> +		    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
> +			dev_err(mdev->dev, "rx irq:link down state\n");
> +			break;
> +		}
> +
> +		clr_mcore_ch_state(mdev);
> +		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
> +		mdev->local_state = LINK_CONNECTED;
> +		complete_all(&mdev->link_complete);
> +		dev_dbg(mdev->dev, "Set to link connected");
> +
> +		break;
> +	case LINK_CONNECTED:
> +		/* No need to handle until local opens */
> +		break;
> +	case LOCAL_CONNECTING:
> +		/* Ack to remote ch_state change */
> +		QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
> +
> +		mdev->local_state = CHANNEL_CONNECTED;
> +		complete_all(&mdev->ch_complete);
> +		dev_dbg(mdev->dev, "Set to channel connected");
> +		qmp_send_irq(mdev);
> +		break;
> +	case CHANNEL_CONNECTED:
> +		/* Check for remote channel down */
> +		if (!QMP_UCORE_CH_VAR_GET(mdev, ch_state)) {
> +			mdev->local_state = LOCAL_CONNECTING;
> +			QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
> +			dev_dbg(mdev->dev, "Remote Disconnect");
> +			qmp_send_irq(mdev);
> +		}
> +
> +		spin_lock_irqsave(&mdev->tx_lock, flags);
> +		/* Check TX done */
> +		if (mdev->tx_sent &&
> +		    QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, rx_done)) {
> +			/* Ack to remote */
> +			QMP_MCORE_CH_ACK_UPDATE(mdev, rx_done);
> +			mdev->tx_sent = false;
> +			cancel_delayed_work(&mdev->dwork);
> +			dev_dbg(mdev->dev, "TX flag cleared");
> +		}
> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
> +
> +		/* Check if remote is Transmitting */
> +		if (!QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, tx))
> +			break;
> +		if (mdev->ucore.bits.frag_size == 0 ||
> +		    mdev->ucore.bits.frag_size > QMP_MAX_PKT_SIZE) {
> +			dev_err(mdev->dev, "Rx frag size error %d\n",
> +				mdev->ucore.bits.frag_size);
> +			break;
> +		}
> +
> +		qmp_recv_data(mdev, QMP_CTRL_DATA_SIZE);
> +		break;
> +	case LOCAL_DISCONNECTING:
> +		if (!QMP_MCORE_CH_VAR_GET(mdev, ch_state)) {
> +			clr_mcore_ch_state(mdev);
> +			mdev->local_state = LINK_CONNECTED;
> +			dev_dbg(mdev->dev, "Channel closed");
> +			reinit_completion(&mdev->ch_complete);
> +		}
> +
> +		break;
> +	default:
> +		dev_err(mdev->dev, "Local Channel State corrupted\n");
> +	}
> +	mutex_unlock(&mdev->state_lock);
> +}
> +
> +static irqreturn_t qmp_irq_handler(int irq, void *priv)
> +{
> +	struct qmp_device *mdev = (struct qmp_device *)priv;
> +
> +	qmp_rx(mdev);
> +
> +	return IRQ_HANDLED;
> +}
> +
> +static int tmel_qmp_parse_devicetree(struct platform_device *pdev,
> +				     struct qmp_device *mdev)

inline.

> +{
> +	struct device *dev = &pdev->dev;
> +
> +	mdev->mcore_desc = devm_platform_ioremap_resource(pdev, 0);
> +	if (!mdev->mcore_desc) {

Incorrect

> +		dev_err(dev, "ioremap failed for mcore reg\n");
> +		return -EIO;
> +	}
> +
> +	mdev->ucore_desc = mdev->mcore_desc + QMP_UCORE_DESC_OFFSET;
> +
> +	mdev->mbox_client.dev = dev;
> +	mdev->mbox_client.knows_txdone = false;
> +	mdev->mbox_chan = mbox_request_channel(&mdev->mbox_client, 0);
> +	if (IS_ERR(mdev->mbox_chan)) {
> +		dev_err(dev, "mbox chan for IPC is missing\n");
> +		return PTR_ERR(mdev->mbox_chan);
> +	}
> +
> +	return 0;
> +}
> +
> +static void tmel_qmp_remove(struct platform_device *pdev)
> +{
> +	struct qmp_device *mdev = platform_get_drvdata(pdev);
> +
> +	mbox_controller_unregister(&mdev->ctrl);
> +	kfree(mdev->rx_pkt.iov_base);
> +}
> +
> +static struct device *tmel_get_device(void)
> +{
> +	struct tmel *tdev = tmeldev;
> +
> +	if (!tdev)
> +		return NULL;
> +
> +	return tdev->dev;
> +}
> +
> +static int tmel_prepare_msg(struct tmel *tdev, u32 msg_uid,
> +			    void *msg_buf, size_t msg_size)
> +{
> +	struct tmel_ipc_pkt *ipc_pkt = tdev->ipc_pkt;
> +	struct ipc_header *msg_hdr = &ipc_pkt->msg_hdr;
> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
> +	struct sram_payload *sram_payload = &ipc_pkt->payload.sram_payload;
> +	int ret;
> +
> +	memset(ipc_pkt, 0, sizeof(struct tmel_ipc_pkt));
> +
> +	msg_hdr->msg_type = TMEL_MSG_UID_MSG_TYPE(msg_uid);
> +	msg_hdr->action_id = TMEL_MSG_UID_ACTION_ID(msg_uid);
> +
> +	pr_debug("uid: %d, msg_size: %zu msg_type:%d, action_id:%d\n",
> +		 msg_uid, msg_size, msg_hdr->msg_type, msg_hdr->action_id);
> +
> +	if (sizeof(struct ipc_header) + msg_size <= MBOX_IPC_PACKET_SIZE) {
> +		/* Mbox only */
> +		msg_hdr->ipc_type = IPC_MBOX_ONLY;
> +		msg_hdr->msg_len = msg_size;
> +		memcpy((void *)mbox_payload, msg_buf, msg_size);
> +	} else if (msg_size <= SRAM_IPC_MAX_BUF_SIZE) {
> +		/* SRAM */
> +		msg_hdr->ipc_type = IPC_MBOX_SRAM;
> +		msg_hdr->msg_len = 8;
> +
> +		tdev->sram_dma_addr = dma_map_single(tdev->dev, msg_buf,
> +						     msg_size,
> +						     DMA_BIDIRECTIONAL);
> +		ret = dma_mapping_error(tdev->dev, tdev->sram_dma_addr);
> +		if (ret != 0) {
> +			pr_err("SRAM DMA mapping error: %d\n", ret);
> +			return ret;
> +		}
> +
> +		sram_payload->payload_ptr = tdev->sram_dma_addr;
> +		sram_payload->payload_len = msg_size;
> +	} else {
> +		pr_err("Invalid payload length: %zu\n", msg_size);

Return error?

> +	}
> +
> +	return 0;
> +}
> +
> +static void tmel_unprepare_message(struct tmel *tdev,
> +				   void *msg_buf, size_t msg_size)
> +{
> +	struct tmel_ipc_pkt *ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
> +
> +	if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_ONLY) {
> +		memcpy(msg_buf, (void *)mbox_payload, msg_size);
> +	} else if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_SRAM) {
> +		dma_unmap_single(tdev->dev, tdev->sram_dma_addr, msg_size,
> +				 DMA_BIDIRECTIONAL);
> +		tdev->sram_dma_addr = 0;
> +	}
> +}
> +
> +static bool tmel_rx_done(struct tmel *tdev)
> +{
> +	return tdev->rx_done;
> +}
> +
> +static int tmel_process_request(u32 msg_uid, void *msg_buf,
> +				size_t msg_size)
> +{
> +	struct tmel *tdev = tmeldev;
> +	unsigned long jiffies;
> +	struct tmel_ipc_pkt *resp_ipc_pkt;
> +	long time_left = 0;
> +	int ret = 0;
> +
> +	/*
> +	 * Check to handle if probe is not successful or not completed yet
> +	 */
> +	if (!tdev) {
> +		pr_err("tmel dev is NULL\n");
> +		return -ENODEV;
> +	}
> +
> +	if (!msg_buf || !msg_size) {
> +		pr_err("Invalid msg_buf or msg_size\n");
> +		return -EINVAL;
> +	}
> +
> +	mutex_lock(&tdev->lock);
> +	tdev->rx_done = false;
> +
> +	ret = tmel_prepare_msg(tdev, msg_uid, msg_buf, msg_size);
> +	if (ret)
> +		return ret;
> +
> +	tdev->pkt.iov_len = sizeof(struct tmel_ipc_pkt);
> +	tdev->pkt.iov_base = (void *)tdev->ipc_pkt;
> +
> +	qmp_send_data(tdev->mdev, &tdev->pkt);
> +	jiffies = msecs_to_jiffies(30000);
> +
> +	time_left = wait_event_interruptible_timeout(tdev->waitq,
> +						     tmel_rx_done(tdev),
> +						     jiffies);
> +
> +	if (!time_left) {
> +		pr_err("Request timed out\n");
> +		ret = -ETIMEDOUT;
> +		goto err_exit;
> +	}
> +
> +	if (tdev->pkt.iov_len != sizeof(struct tmel_ipc_pkt)) {
> +		pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
> +		       tdev->pkt.iov_len, sizeof(struct tmel_ipc_pkt));
> +		ret = -EPROTO;
> +		goto err_exit;
> +	}
> +
> +	resp_ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
> +	tmel_unprepare_message(tdev, msg_buf, msg_size);
> +	tdev->rx_done = false;
> +	ret = resp_ipc_pkt->msg_hdr.response;
> +
> +err_exit:
> +	mutex_unlock(&tdev->lock);
> +	return ret;
> +}
> +
> +static int tmel_secboot_sec_auth(u32 sw_id, void *metadata, size_t size)
> +{
> +	struct device *dev = tmel_get_device();
> +	struct tmel_secboot_sec_auth *msg;
> +	dma_addr_t elf_buf_phys;
> +	void *elf_buf;
> +	int ret;
> +
> +	if (!dev || !metadata)
> +		return -EINVAL;
> +
> +	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
> +
> +	elf_buf = dma_alloc_coherent(dev, size, &elf_buf_phys, GFP_KERNEL);
> +	if (!elf_buf)
> +		return -ENOMEM;
> +
> +	memcpy(elf_buf, metadata, size);
> +
> +	msg->req.sw_id = sw_id;
> +	msg->req.elf_buf.buf = (u32)elf_buf_phys;
> +	msg->req.elf_buf.buf_len = (u32)size;
> +
> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SEC_AUTH, msg,
> +				   sizeof(struct tmel_secboot_sec_auth));
> +	if (ret) {
> +		pr_err("Failed to send IPC: %d\n", ret);
> +	} else if (msg->resp.status || msg->resp.extended_error) {
> +		pr_err("Failed with status: %d error: %d\n",
> +		       msg->resp.status, msg->resp.extended_error);
> +		ret = msg->resp.status;
> +	}
> +
> +	kfree(msg);
> +	dma_free_coherent(dev, size, elf_buf, elf_buf_phys);
> +
> +	return ret;
> +}
> +
> +static int tmel_secboot_teardown(u32 sw_id, u32 secondary_sw_id)
> +{
> +	struct device *dev = tmel_get_device();
> +	struct tmel_secboot_teardown msg = {0};
> +	int ret;
> +
> +	if (!dev)
> +		return -EINVAL;
> +
> +	msg.req.sw_id = sw_id;
> +	msg.req.secondary_sw_id = secondary_sw_id;
> +	msg.resp.status = TMEL_ERROR_GENERIC;
> +
> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN, &msg,
> +				   sizeof(msg));
> +	if (ret) {
> +		pr_err("Failed to send IPC: %d\n", ret);
> +	} else if (msg.resp.status) {
> +		pr_err("Failed with status: %d\n", msg.resp.status);
> +		ret = msg.resp.status;
> +	}
> +
> +	return ret;
> +}
> +
> +static int tmel_init(struct qmp_device *mdev)
> +{
> +	struct tmel *tdev;
> +
> +	tdev = devm_kzalloc(mdev->dev, sizeof(*tdev), GFP_KERNEL);
> +	if (!tdev)
> +		return -ENOMEM;
> +
> +	mutex_init(&tdev->lock);
> +
> +	tdev->ipc_pkt = devm_kzalloc(mdev->dev, sizeof(struct tmel_ipc_pkt),
> +				     GFP_KERNEL);
> +	if (!tdev->ipc_pkt)
> +		return -ENOMEM;
> +
> +	init_waitqueue_head(&tdev->waitq);
> +
> +	tdev->rx_done = false;
> +	tdev->dev = mdev->dev;
> +
> +	tmeldev = tdev;
> +	tmeldev->mdev = mdev;
> +
> +	return 0;
> +}
> +
> +static int tmel_qmp_send(struct mbox_chan *chan, void *data)
> +{
> +	struct qmp_device *mdev = chan->con_priv;
> +
> +	mdev->qwork.data =  data;
> +
> +	queue_work(system_wq, &mdev->qwork.work);
> +
> +	return 0;
> +}
> +
> +static void tmel_qmp_send_work(struct work_struct *work)
> +{
> +	struct qmp_work *qwork = container_of(work, struct qmp_work, work);
> +	struct qmp_device *mdev = tmeldev->mdev;
> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
> +
> +	struct tmel_qmp_msg *tmsg = qwork->data;
> +	struct tmel_sec_auth *smsg = tmsg->msg;
> +	int ret;
> +
> +	switch (tmsg->msg_id) {
> +	case TMEL_MSG_UID_SECBOOT_SEC_AUTH:
> +		ret = tmel_secboot_sec_auth(smsg->pas_id,
> +					    smsg->data,
> +					    smsg->size);
> +		break;
> +	case TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN:
> +		ret = tmel_secboot_teardown(smsg->pas_id, 0);
> +		break;
> +	}
> +
> +	mbox_chan_txdone(chan, 0);
> +}
> +
> +/**
> + * tmel_qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
> + *		      device. Make sure the channel is not already in use.
> + * @mbox:       Mailbox device controlls the requested channel.
> + * @spec:       Device tree arguments to specify which channel is requested.
> + */
> +static struct mbox_chan *tmel_qmp_mbox_of_xlate(struct mbox_controller *mbox,
> +						const struct of_phandle_args *spec)
> +{
> +	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
> +	unsigned int channel = spec->args[0];
> +
> +	if (!mdev)
> +		return ERR_PTR(-EPROBE_DEFER);
> +
> +	if (channel >= mbox->num_chans)
> +		return ERR_PTR(-EINVAL);
> +
> +	mutex_lock(&mdev->state_lock);
> +	if (mdev->ch_in_use) {
> +		dev_err(mdev->dev, "mbox channel already in use\n");
> +		mutex_unlock(&mdev->state_lock);
> +		return ERR_PTR(-EBUSY);
> +	}
> +	mdev->ch_in_use = true;
> +	mutex_unlock(&mdev->state_lock);
> +
> +	return &mbox->chans[0];
> +}
> +
> +static struct mbox_chan_ops tmel_qmp_ops = {
> +	.startup = tmel_qmp_startup,
> +	.shutdown = tmel_qmp_shutdown,
> +	.send_data = tmel_qmp_send,
> +};
> +
> +static int tmel_qmp_probe(struct platform_device *pdev)
> +{
> +	struct device_node *node = pdev->dev.of_node;
> +	struct mbox_chan *chans;
> +	struct qmp_device *mdev;
> +	int ret = 0;
> +
> +	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
> +	if (!mdev)
> +		return -ENOMEM;
> +
> +	platform_set_drvdata(pdev, mdev);
> +
> +	ret = tmel_qmp_parse_devicetree(pdev, mdev);
> +	if (ret)
> +		return ret;
> +
> +	mdev->dev = &pdev->dev;
> +
> +	chans = devm_kzalloc(mdev->dev,
> +			     sizeof(*chans) * QMP_NUM_CHANS, GFP_KERNEL);
> +	if (!chans)
> +		return -ENOMEM;
> +
> +	INIT_WORK(&mdev->qwork.work, tmel_qmp_send_work);
> +
> +	mdev->ctrl.dev = &pdev->dev;
> +	mdev->ctrl.ops = &tmel_qmp_ops;
> +	mdev->ctrl.chans = chans;
> +	chans[0].con_priv = mdev;
> +	mdev->ctrl.num_chans = QMP_NUM_CHANS;
> +	mdev->ctrl.txdone_irq = true;
> +	mdev->ctrl.of_xlate = tmel_qmp_mbox_of_xlate;
> +
> +	ret = mbox_controller_register(&mdev->ctrl);

devm_

> +	if (ret) {
> +		dev_err(mdev->dev, "failed to register mbox controller\n");
> +		return ret;
> +	}
> +
> +	spin_lock_init(&mdev->tx_lock);
> +	mutex_init(&mdev->state_lock);
> +	mdev->local_state = LINK_DISCONNECTED;
> +	init_completion(&mdev->link_complete);
> +	init_completion(&mdev->ch_complete);

Oh, nice. So mbox is already there, but the structure is not
initialized. 

> +
> +	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
> +
> +	ret = platform_get_irq(pdev, 0);
> +
> +	ret = devm_request_threaded_irq(mdev->dev, ret,
> +					NULL, qmp_irq_handler,
> +					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
> +					node->name, (void *)mdev);

drop type conversion.

Why is IRQ registered after mbox?

> +	if (ret < 0) {
> +		dev_err(mdev->dev, "request threaded irq failed, ret %d\n",
> +			ret);
> +
> +		tmel_qmp_remove(pdev);
> +		return ret;
> +	}
> +
> +	/* Receive any outstanding initial data */
> +	tmel_init(mdev);
> +	qmp_rx(mdev);

You've already registered a mailbox. There can be a message traffic already.

> +
> +	return 0;
> +}
> +
> +static const struct of_device_id tmel_qmp_dt_match[] = {
> +	{ .compatible = "qcom,ipq5424-tmel-qmp" },
> +	{},
> +};
> +
> +static struct platform_driver tmel_qmp_driver = {
> +	.driver = {
> +		.name = "tmel_qmp_mbox",
> +		.of_match_table = tmel_qmp_dt_match,
> +	},
> +	.probe = tmel_qmp_probe,
> +	.remove = tmel_qmp_remove,
> +};
> +module_platform_driver(tmel_qmp_driver);
> +
> +MODULE_DESCRIPTION("QCOM TMEL QMP DRIVER");

driver

> +MODULE_LICENSE("GPL");
> diff --git a/include/linux/mailbox/tmelcom-qmp.h b/include/linux/mailbox/tmelcom-qmp.h
> new file mode 100644
> index 000000000000..9fa450eaf736
> --- /dev/null
> +++ b/include/linux/mailbox/tmelcom-qmp.h
> @@ -0,0 +1,157 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
> + */
> +#ifndef _TMELCOM_H_
> +#define _TMELCOM_H_
> +
> +/*----------------------------------------------------------------------------
> + * Documentation
> + * --------------------------------------------------------------------------
> + */
> +
> +/*
> + * TMEL Messages Unique Identifiers bit layout
> +    _____________________________________
> +   |	   |	    |	   |
> +   | 31------16| 15-------8 | 7-------0 |
> +   | Reserved  |messageType | actionID  |
> +   |___________|____________|___________|
> +	       \___________  ___________/
> +			   \/
> +		      TMEL_MSG_UID
> +*/

#define instead of drawing pictures. Think about people using Braille
terminals.

> +
> +/*
> + * TMEL Messages Unique Identifiers Parameter ID bit layout
> +_________________________________________________________________________________________
> +|     |     |     |     |     |     |     |     |     |     |     |    |    |    |       |
> +|31-30|29-28|27-26|25-24|23-22|21-20|19-18|17-16|15-14|13-12|11-10|9--8|7--6|5--4|3-----0|
> +| p14 | p13 | p12 | p11 | p10 | p9  | p8  | p7  | p6  | p5  | p4  | p3 | p2 | p1 | nargs |
> +|type |type |type |type |type |type |type |type |type |type |type |type|type|type|       |
> +|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|____|____|____|_______|

Totally unreadable and not helping. What is nargs? What kind of types
are those?

> +
> +*/
> +
> +/*
> + * Macro used to define unique TMEL Message Identifier based on
> + * message type and action identifier.
> + */
> +#define TMEL_MSG_UID_CREATE(m, a)	((u32)(((m & 0xff) << 8) | (a & 0xff)))

What is m and a? Please use sensible names in the API.

> +
> +/** Helper macro to extract the messageType from TMEL_MSG_UID. */
> +#define TMEL_MSG_UID_MSG_TYPE(v)	((v & GENMASK(15, 8)) >> 8)

#define MASK
use FIELD_PREP, FIELD_GET

> +
> +/** Helper macro to extract the actionID from TMEL_MSG_UID. */
> +#define TMEL_MSG_UID_ACTION_ID(v)	(v & GENMASK(7, 0))
> +
> +/****************************************************************************
> + *
> + * All definitions of supported messageType's.
> + *
> + * 0x00 -> 0xF0 messageType used for production use cases.
> + * 0xF1 -> 0xFF messageType reserved(can be used for test puprposes).

Which production use cases? Reerved by whom? Who can use those?

> + *
> + * <Template> : TMEL_MSG_<MSGTYPE_NAME>
> + * **************************************************************************/
> +#define TMEL_MSG_SECBOOT		 0x00
> +
> +/****************************************************************************
> + *
> + * All definitions of action ID's per messageType.
> + *
> + * 0x00 -> 0xBF actionID used for production use cases.
> + * 0xC0 -> 0xFF messageType must be reserved for test use cases.
> + *
> + * NOTE: Test ID's shouldn't appear in this file.
> + *
> + * <Template> : TMEL_ACTION_<MSGTYPE_NAME>_<ACTIONID_NAME>
> + * **************************************************************************/
> +
> +/*
> + * ----------------------------------------------------------------------------
> +		Action ID's for TMEL_MSG_SECBOOT
> + * ------------------------------------------------------------------------
> + */
> +#define TMEL_ACTION_SECBOOT_SEC_AUTH		     0x04
> +#define TMEL_ACTION_SECBOOT_SS_TEAR_DOWN	     0x0A
> +
> +/****************************************************************************
> + *
> + * All definitions of TMEL Message UID's (messageType | actionID).
> + *
> + * <Template> : TMEL_MSG_UID_<MSGTYPE_NAME>_<ACTIONID_NAME>
> + * *************************************************************************/
> +
> +/*----------------------------------------------------------------------------
> + * UID's for TMEL_MSG_SECBOOT
> + *-------------------------------------------------------------------------
> + */
> +#define TMEL_MSG_UID_SECBOOT_SEC_AUTH	    TMEL_MSG_UID_CREATE(TMEL_MSG_SECBOOT,\
> +					    TMEL_ACTION_SECBOOT_SEC_AUTH)
> +
> +#define TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN	TMEL_MSG_UID_CREATE(TMEL_MSG_SECBOOT,\
> +						TMEL_ACTION_SECBOOT_SS_TEAR_DOWN)
> +
> +#define HW_MBOX_SIZE			32
> +#define MBOX_QMP_CTRL_DATA_SIZE		4
> +#define MBOX_RSV_SIZE			4
> +#define MBOX_IPC_PACKET_SIZE		(HW_MBOX_SIZE - MBOX_QMP_CTRL_DATA_SIZE - MBOX_RSV_SIZE)
> +#define MBOX_IPC_MAX_PARAMS		5
> +
> +#define MAX_PARAM_IN_PARAM_ID		14
> +#define PARAM_CNT_FOR_PARAM_TYPE_OUTBUF	3
> +#define SRAM_IPC_MAX_PARAMS		(MAX_PARAM_IN_PARAM_ID * PARAM_CNT_FOR_PARAM_TYPE_OUTBUF)
> +#define SRAM_IPC_MAX_BUF_SIZE		(SRAM_IPC_MAX_PARAMS * sizeof(u32))
> +
> +#define TMEL_ERROR_GENERIC		(0x1U)
> +#define TMEL_ERROR_NOT_SUPPORTED	(0x2U)
> +#define TMEL_ERROR_BAD_PARAMETER	(0x3U)
> +#define TMEL_ERROR_BAD_MESSAGE		(0x4U)
> +#define TMEL_ERROR_BAD_ADDRESS		(0x5U)
> +#define TMEL_ERROR_TMELCOM_FAILURE	(0x6U)
> +#define TMEL_ERROR_TMEL_BUSY		(0x7U)
> +
> +enum ipc_type {
> +	IPC_MBOX_ONLY,
> +	IPC_MBOX_SRAM,
> +};
> +
> +struct ipc_header {
> +	u8 ipc_type:1;
> +	u8 msg_len:7;
> +	u8 msg_type;
> +	u8 action_id;
> +	s8 response;
> +} __packed;
> +
> +struct mbox_payload {
> +	u32 param[MBOX_IPC_MAX_PARAMS];
> +};
> +
> +struct sram_payload {
> +	u32 payload_ptr;
> +	u32 payload_len;
> +};
> +
> +union ipc_payload {
> +	struct mbox_payload mbox_payload;
> +	struct sram_payload sram_payload;
> +} __packed;
> +
> +struct tmel_ipc_pkt {
> +	struct ipc_header msg_hdr;
> +	union ipc_payload payload;
> +} __packed;
> +
> +struct tmel_qmp_msg {
> +	void *msg;
> +	u32 msg_id;
> +};
> +
> +struct tmel_sec_auth {
> +	void *data;
> +	u32 size;
> +	u32 pas_id;
> +};
> +#endif  /*_TMELCOM_H_ */
> -- 
> 2.34.1
>
kernel test robot Dec. 31, 2024, 6:59 p.m. UTC | #4
Hi Sricharan,

kernel test robot noticed the following build warnings:

[auto build test WARNING on robh/for-next]
[also build test WARNING on linus/master v6.13-rc5 next-20241220]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Sricharan-R/dt-bindings-mailbox-Document-qcom-tmel-qmp/20241231-135219
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
patch link:    https://lore.kernel.org/r/20241231054900.2144961-3-quic_srichara%40quicinc.com
patch subject: [PATCH V2 2/2] mailbox: tmelite-qmp: Introduce TMEL QMP mailbox driver
config: sh-allmodconfig (https://download.01.org/0day-ci/archive/20250101/202501010110.6sHxF8ne-lkp@intel.com/config)
compiler: sh4-linux-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250101/202501010110.6sHxF8ne-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501010110.6sHxF8ne-lkp@intel.com/

All warnings (new ones prefixed by >>):

   In file included from include/linux/device.h:15,
                    from include/linux/dma-mapping.h:5,
                    from drivers/mailbox/qcom-tmel-qmp.c:10:
   drivers/mailbox/qcom-tmel-qmp.c: In function 'qmp_send_data':
>> drivers/mailbox/qcom-tmel-qmp.c:312:36: warning: format '%lu' expects argument of type 'long unsigned int', but argument 3 has type 'size_t' {aka 'unsigned int'} [-Wformat=]
     312 |                 dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
         |                                    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   include/linux/dev_printk.h:110:30: note: in definition of macro 'dev_printk_index_wrap'
     110 |                 _p_func(dev, fmt, ##__VA_ARGS__);                       \
         |                              ^~~
   include/linux/dev_printk.h:154:56: note: in expansion of macro 'dev_fmt'
     154 |         dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
         |                                                        ^~~~~~~
   drivers/mailbox/qcom-tmel-qmp.c:312:17: note: in expansion of macro 'dev_err'
     312 |                 dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
         |                 ^~~~~~~
   drivers/mailbox/qcom-tmel-qmp.c:312:63: note: format string is defined here
     312 |                 dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
         |                                                             ~~^
         |                                                               |
         |                                                               long unsigned int
         |                                                             %u
   In file included from include/asm-generic/bug.h:22,
                    from arch/sh/include/asm/bug.h:112,
                    from include/linux/bug.h:5,
                    from include/linux/thread_info.h:13,
                    from include/asm-generic/preempt.h:5,
                    from ./arch/sh/include/generated/asm/preempt.h:1,
                    from include/linux/preempt.h:79,
                    from include/linux/spinlock.h:56,
                    from include/linux/swait.h:7,
                    from include/linux/completion.h:12,
                    from drivers/mailbox/qcom-tmel-qmp.c:7:
   drivers/mailbox/qcom-tmel-qmp.c: In function 'tmel_process_request':
   include/linux/kern_levels.h:5:25: warning: format '%lu' expects argument of type 'long unsigned int', but argument 2 has type 'size_t' {aka 'unsigned int'} [-Wformat=]
       5 | #define KERN_SOH        "\001"          /* ASCII Start Of Header */
         |                         ^~~~~~
   include/linux/printk.h:473:25: note: in definition of macro 'printk_index_wrap'
     473 |                 _p_func(_fmt, ##__VA_ARGS__);                           \
         |                         ^~~~
   include/linux/printk.h:544:9: note: in expansion of macro 'printk'
     544 |         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
         |         ^~~~~~
   include/linux/kern_levels.h:11:25: note: in expansion of macro 'KERN_SOH'
      11 | #define KERN_ERR        KERN_SOH "3"    /* error conditions */
         |                         ^~~~~~~~
   include/linux/printk.h:544:16: note: in expansion of macro 'KERN_ERR'
     544 |         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
         |                ^~~~~~~~
   drivers/mailbox/qcom-tmel-qmp.c:709:17: note: in expansion of macro 'pr_err'
     709 |                 pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
         |                 ^~~~~~
   drivers/mailbox/qcom-tmel-qmp.c: In function 'tmel_qmp_send_work':
>> drivers/mailbox/qcom-tmel-qmp.c:834:13: warning: variable 'ret' set but not used [-Wunused-but-set-variable]
     834 |         int ret;
         |             ^~~
--
>> drivers/mailbox/qcom-tmel-qmp.c:152: warning: Function parameter or struct member 'qwork' not described in 'qmp_device'
>> drivers/mailbox/qcom-tmel-qmp.c:152: warning: Function parameter or struct member 'data' not described in 'qmp_device'
>> drivers/mailbox/qcom-tmel-qmp.c:152: warning: Function parameter or struct member 'ch_in_use' not described in 'qmp_device'
>> drivers/mailbox/qcom-tmel-qmp.c:303: warning: Function parameter or struct member 'mdev' not described in 'qmp_send_data'
>> drivers/mailbox/qcom-tmel-qmp.c:303: warning: Excess function parameter 'chan' description in 'qmp_send_data'
>> drivers/mailbox/qcom-tmel-qmp.c:393: warning: Function parameter or struct member 'mdev' not described in 'qmp_recv_data'
>> drivers/mailbox/qcom-tmel-qmp.c:393: warning: Excess function parameter 'mbox' description in 'qmp_recv_data'
>> drivers/mailbox/qcom-tmel-qmp.c:437: warning: Function parameter or struct member 'mdev' not described in 'qmp_rx'
>> drivers/mailbox/qcom-tmel-qmp.c:437: warning: Excess function parameter 'mbox' description in 'qmp_rx'


vim +312 drivers/mailbox/qcom-tmel-qmp.c

   101	
   102	/**
   103	 * struct qmp_device - local information for managing a single mailbox
   104	 * @dev:	    The device that corresponds to this mailbox
   105	 * @ctrl:	    The mbox controller for this mailbox
   106	 * @mcore_desc:	    Local core (APSS) mailbox descriptor
   107	 * @ucore_desc:	    Remote core (TME-L) mailbox descriptor
   108	 * @mcore:	    Local core (APSS) channel descriptor
   109	 * @ucore:	    Remote core (TME-L) channel descriptor
   110	 * @rx_pkt:	    Buffer to pass to client, holds received data from mailbox
   111	 * @tx_pkt:	    Buffer from client, holds data to send on mailbox
   112	 * @mbox_client:    Mailbox client for the IPC interrupt
   113	 * @mbox_chan:	    Mailbox client chan for the IPC interrupt
   114	 * @local_state:    Current state of mailbox protocol
   115	 * @state_lock:	    Serialize mailbox state changes
   116	 * @tx_lock:	    Serialize access for writes to mailbox
   117	 * @link_complete:  Use to block until link negotiation with remote proc
   118	 * @ch_complete:    Use to block until the channel is fully opened
   119	 * @dwork:	    Delayed work to detect timed out tx
   120	 * @tx_sent:	    True if tx is sent and remote proc has not sent ack
   121	 */
   122	struct qmp_device {
   123		struct device *dev;
   124		struct mbox_controller ctrl;
   125		struct qmp_work qwork;
   126	
   127		void __iomem *mcore_desc;
   128		void __iomem *ucore_desc;
   129		union channel_desc mcore;
   130		union channel_desc ucore;
   131	
   132		struct kvec rx_pkt;
   133		struct kvec tx_pkt;
   134	
   135		struct mbox_client mbox_client;
   136		struct mbox_chan *mbox_chan;
   137	
   138		enum qmp_local_state local_state;
   139	
   140		/* Lock for QMP link state changes */
   141		struct mutex state_lock;
   142		/* Lock to serialize access to mailbox */
   143		spinlock_t tx_lock;
   144	
   145		struct completion link_complete;
   146		struct completion ch_complete;
   147		struct delayed_work dwork;
   148		void *data;
   149	
   150		bool tx_sent;
   151		bool ch_in_use;
 > 152	};
   153	
   154	struct tmel_msg_param_type_buf_in {
   155		u32 buf;
   156		u32 buf_len;
   157	};
   158	
   159	struct tmel_secboot_sec_auth_req {
   160		u32 sw_id;
   161		struct tmel_msg_param_type_buf_in elf_buf;
   162		struct tmel_msg_param_type_buf_in region_list;
   163		u32 relocate;
   164	} __packed;
   165	
   166	struct tmel_secboot_sec_auth_resp {
   167		u32 first_seg_addr;
   168		u32 first_seg_len;
   169		u32 entry_addr;
   170		u32 extended_error;
   171		u32 status;
   172	} __packed;
   173	
   174	struct tmel_secboot_sec_auth {
   175		struct tmel_secboot_sec_auth_req req;
   176		struct tmel_secboot_sec_auth_resp resp;
   177	} __packed;
   178	
   179	struct tmel_secboot_teardown_req {
   180		u32 sw_id;
   181		u32 secondary_sw_id;
   182	} __packed;
   183	
   184	struct tmel_secboot_teardown_resp {
   185		u32 status;
   186	} __packed;
   187	
   188	struct tmel_secboot_teardown {
   189		struct tmel_secboot_teardown_req req;
   190		struct tmel_secboot_teardown_resp resp;
   191	} __packed;
   192	
   193	struct tmel {
   194		struct device *dev;
   195		struct qmp_device *mdev;
   196		struct kvec pkt;
   197		/* To serialize incoming tmel request */
   198		struct mutex lock;
   199		struct tmel_ipc_pkt *ipc_pkt;
   200		dma_addr_t sram_dma_addr;
   201		wait_queue_head_t waitq;
   202		bool rx_done;
   203	};
   204	
   205	static struct tmel *tmeldev;
   206	
   207	/**
   208	 * qmp_send_irq() - send an irq to a remote entity as an event signal.
   209	 * @mdev:       Which remote entity that should receive the irq.
   210	 */
   211	static void qmp_send_irq(struct qmp_device *mdev)
   212	{
   213		/* Update the mcore val to mcore register */
   214		iowrite32(mdev->mcore.val, mdev->mcore_desc);
   215		/* Ensure desc update is visible before IPC */
   216		wmb();
   217	
   218		dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
   219			mdev->mcore.val, mdev->ucore.val);
   220	
   221		mbox_send_message(mdev->mbox_chan, NULL);
   222		mbox_client_txdone(mdev->mbox_chan, 0);
   223	}
   224	
   225	/**
   226	 * qmp_notify_timeout() - Notify client of tx timeout with -ETIME
   227	 * @work:		  Structure for work that was scheduled.
   228	 */
   229	static void qmp_notify_timeout(struct work_struct *work)
   230	{
   231		struct delayed_work *dwork = to_delayed_work(work);
   232		struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
   233		struct mbox_chan *chan = &mdev->ctrl.chans[0];
   234		int err = -ETIME;
   235		unsigned long flags;
   236	
   237		spin_lock_irqsave(&mdev->tx_lock, flags);
   238		if (!mdev->tx_sent) {
   239			spin_unlock_irqrestore(&mdev->tx_lock, flags);
   240			return;
   241		}
   242		mdev->tx_sent = false;
   243		spin_unlock_irqrestore(&mdev->tx_lock, flags);
   244		dev_dbg(mdev->dev, "%s: TX timeout", __func__);
   245		mbox_chan_txdone(chan, err);
   246	}
   247	
   248	static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
   249	{
   250		schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TOUT_MS));
   251	}
   252	
   253	/**
   254	 * tmel_qmp_startup() - Start qmp mailbox channel for communication. Waits for
   255	 *		       remote subsystem to open channel if link is not
   256	 *		       initated or until timeout.
   257	 * @chan:	       mailbox channel that is being opened.
   258	 *
   259	 * Return: 0 on succes or standard Linux error code.
   260	 */
   261	static int tmel_qmp_startup(struct mbox_chan *chan)
   262	{
   263		struct qmp_device *mdev = chan->con_priv;
   264		int ret;
   265	
   266		if (!mdev)
   267			return -EINVAL;
   268	
   269		ret = wait_for_completion_timeout(&mdev->link_complete,
   270						  msecs_to_jiffies(QMP_TOUT_MS));
   271		if (!ret)
   272			return -EAGAIN;
   273	
   274		mutex_lock(&mdev->state_lock);
   275		if (mdev->local_state == LINK_CONNECTED) {
   276			QMP_MCORE_CH_VAR_SET(mdev, ch_state);
   277			mdev->local_state = LOCAL_CONNECTING;
   278			dev_dbg(mdev->dev, "link complete, local connecting");
   279			qmp_send_irq(mdev);
   280		}
   281		mutex_unlock(&mdev->state_lock);
   282	
   283		ret = wait_for_completion_timeout(&mdev->ch_complete,
   284						  msecs_to_jiffies(QMP_TOUT_MS));
   285		if (!ret)
   286			return -ETIME;
   287	
   288		return 0;
   289	}
   290	
   291	/**
   292	 * qmp_send_data() - Copy the data to the channel's mailbox and notify
   293	 *		     remote subsystem of new data. This function will
   294	 *		     return an error if the previous message sent has
   295	 *		     not been read. Cannot Sleep.
   296	 * @chan:	mailbox channel that data is to be sent over.
   297	 * @data:	Data to be sent to remote processor, should be in the format of
   298	 *		a kvec.
   299	 *
   300	 * Return: 0 on succes or standard Linux error code.
   301	 */
   302	static int qmp_send_data(struct qmp_device *mdev, void *data)
 > 303	{
   304		struct kvec *pkt = (struct kvec *)data;
   305		void __iomem *addr;
   306		unsigned long flags;
   307	
   308		if (!mdev || !data || !completion_done(&mdev->ch_complete))
   309			return -EINVAL;
   310	
   311		if (pkt->iov_len > QMP_MAX_PKT_SIZE) {
 > 312			dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
   313			return -EINVAL;
   314		}
   315	
   316		spin_lock_irqsave(&mdev->tx_lock, flags);
   317		if (mdev->tx_sent) {
   318			spin_unlock_irqrestore(&mdev->tx_lock, flags);
   319			return -EAGAIN;
   320		}
   321	
   322		dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
   323			mdev->mcore.val, mdev->ucore.val);
   324	
   325		addr = mdev->mcore_desc + QMP_CTRL_DATA_SIZE;
   326		memcpy_toio(addr, pkt->iov_base, pkt->iov_len);
   327	
   328		mdev->mcore.bits.frag_size = pkt->iov_len;
   329		mdev->mcore.bits.rem_frag_count = 0;
   330	
   331		dev_dbg(mdev->dev, "Copied buffer to mbox, sz: %d",
   332			mdev->mcore.bits.frag_size);
   333	
   334		mdev->tx_sent = true;
   335		QMP_MCORE_CH_VAR_TOGGLE(mdev, tx);
   336		qmp_send_irq(mdev);
   337		qmp_schedule_tx_timeout(mdev);
   338		spin_unlock_irqrestore(&mdev->tx_lock, flags);
   339	
   340		return 0;
   341	}
   342	
   343	/**
   344	 * tmel_qmp_shutdown() - Disconnect this mailbox channel so the client does not
   345	 *			 receive anymore data and can reliquish control
   346	 *			 of the channel.
   347	 * @chan:		 mailbox channel to be shutdown.
   348	 */
   349	static void tmel_qmp_shutdown(struct mbox_chan *chan)
   350	{
   351		struct qmp_device *mdev = chan->con_priv;
   352	
   353		mutex_lock(&mdev->state_lock);
   354		if (mdev->local_state != LINK_DISCONNECTED) {
   355			mdev->local_state = LOCAL_DISCONNECTING;
   356			QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
   357			qmp_send_irq(mdev);
   358		}
   359		mutex_unlock(&mdev->state_lock);
   360	}
   361	
   362	static void tmel_receive_message(void *message)
   363	{
   364		struct tmel *tdev = tmeldev;
   365		struct kvec *pkt = NULL;
   366	
   367		if (!message) {
   368			pr_err("spurious message received\n");
   369			goto tmel_receive_end;
   370		}
   371	
   372		if (tdev->rx_done) {
   373			pr_err("tmel response pending\n");
   374			goto tmel_receive_end;
   375		}
   376	
   377		pkt = (struct kvec *)message;
   378		tdev->pkt.iov_len = pkt->iov_len;
   379		tdev->pkt.iov_base = pkt->iov_base;
   380		tdev->rx_done = true;
   381	
   382	tmel_receive_end:
   383		wake_up_interruptible(&tdev->waitq);
   384	}
   385	
   386	/**
   387	 * qmp_recv_data() -	received notification that data is available in the
   388	 *			mailbox. Copy data from mailbox and pass to client.
   389	 * @mbox:		mailbox device that received the notification.
   390	 * @mbox_of:		offset of mailbox after QMP Control data.
   391	 */
   392	static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
 > 393	{
   394		void __iomem *addr;
   395		struct kvec *pkt;
   396	
   397		addr = mdev->ucore_desc + mbox_of;
   398		pkt = &mdev->rx_pkt;
   399		pkt->iov_len = mdev->ucore.bits.frag_size;
   400	
   401		memcpy_fromio(pkt->iov_base, addr, pkt->iov_len);
   402		QMP_MCORE_CH_ACK_UPDATE(mdev, tx);
   403		dev_dbg(mdev->dev, "%s: Send RX data to TMEL Client", __func__);
   404		tmel_receive_message(pkt);
   405	
   406		QMP_MCORE_CH_VAR_TOGGLE(mdev, rx_done);
   407		qmp_send_irq(mdev);
   408	}
   409	
   410	/**
   411	 * clr_mcore_ch_state() - Clear the mcore state of a mailbox.
   412	 * @mdev:	mailbox device to be initialized.
   413	 */
   414	static void clr_mcore_ch_state(struct qmp_device *mdev)
   415	{
   416		QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
   417		QMP_MCORE_CH_VAR_ACK_CLR(mdev, ch_state);
   418	
   419		QMP_MCORE_CH_VAR_CLR(mdev, tx);
   420		QMP_MCORE_CH_VAR_ACK_CLR(mdev, tx);
   421	
   422		QMP_MCORE_CH_VAR_CLR(mdev, rx_done);
   423		QMP_MCORE_CH_VAR_ACK_CLR(mdev, rx_done);
   424	
   425		QMP_MCORE_CH_VAR_CLR(mdev, read_int);
   426		QMP_MCORE_CH_VAR_ACK_CLR(mdev, read_int);
   427	
   428		mdev->mcore.bits.frag_size = 0;
   429		mdev->mcore.bits.rem_frag_count = 0;
   430	}
   431	
   432	/**
   433	 * qmp_rx() - Handle incoming messages from remote processor.
   434	 * @mbox:	mailbox device that received notification.
   435	 */
   436	static void qmp_rx(struct qmp_device *mdev)
 > 437	{
   438		unsigned long flags;
   439	
   440		/* read remote_desc from mailbox register */
   441		mdev->ucore.val = ioread32(mdev->ucore_desc);
   442	
   443		dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
   444			mdev->mcore.val, mdev->ucore.val);
   445	
   446		mutex_lock(&mdev->state_lock);
   447	
   448		/* Check if remote link down */
   449		if (mdev->local_state >= LINK_CONNECTED &&
   450		    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
   451			mdev->local_state = LINK_NEGOTIATION;
   452			QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
   453			qmp_send_irq(mdev);
   454			mutex_unlock(&mdev->state_lock);
   455			return;
   456		}
   457	
   458		switch (mdev->local_state) {
   459		case LINK_DISCONNECTED:
   460			QMP_MCORE_CH_VAR_SET(mdev, link_state);
   461			mdev->local_state = LINK_NEGOTIATION;
   462			mdev->rx_pkt.iov_base = kzalloc(QMP_MAX_PKT_SIZE,
   463							GFP_KERNEL);
   464	
   465			if (!mdev->rx_pkt.iov_base) {
   466				dev_err(mdev->dev, "rx pkt alloc failed");
   467				break;
   468			}
   469			dev_dbg(mdev->dev, "Set to link negotiation");
   470			qmp_send_irq(mdev);
   471	
   472			break;
   473		case LINK_NEGOTIATION:
   474			if (!QMP_MCORE_CH_VAR_GET(mdev, link_state) ||
   475			    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
   476				dev_err(mdev->dev, "rx irq:link down state\n");
   477				break;
   478			}
   479	
   480			clr_mcore_ch_state(mdev);
   481			QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
   482			mdev->local_state = LINK_CONNECTED;
   483			complete_all(&mdev->link_complete);
   484			dev_dbg(mdev->dev, "Set to link connected");
   485	
   486			break;
   487		case LINK_CONNECTED:
   488			/* No need to handle until local opens */
   489			break;
   490		case LOCAL_CONNECTING:
   491			/* Ack to remote ch_state change */
   492			QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
   493	
   494			mdev->local_state = CHANNEL_CONNECTED;
   495			complete_all(&mdev->ch_complete);
   496			dev_dbg(mdev->dev, "Set to channel connected");
   497			qmp_send_irq(mdev);
   498			break;
   499		case CHANNEL_CONNECTED:
   500			/* Check for remote channel down */
   501			if (!QMP_UCORE_CH_VAR_GET(mdev, ch_state)) {
   502				mdev->local_state = LOCAL_CONNECTING;
   503				QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
   504				dev_dbg(mdev->dev, "Remote Disconnect");
   505				qmp_send_irq(mdev);
   506			}
   507	
   508			spin_lock_irqsave(&mdev->tx_lock, flags);
   509			/* Check TX done */
   510			if (mdev->tx_sent &&
   511			    QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, rx_done)) {
   512				/* Ack to remote */
   513				QMP_MCORE_CH_ACK_UPDATE(mdev, rx_done);
   514				mdev->tx_sent = false;
   515				cancel_delayed_work(&mdev->dwork);
   516				dev_dbg(mdev->dev, "TX flag cleared");
   517			}
   518			spin_unlock_irqrestore(&mdev->tx_lock, flags);
   519	
   520			/* Check if remote is Transmitting */
   521			if (!QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, tx))
   522				break;
   523			if (mdev->ucore.bits.frag_size == 0 ||
   524			    mdev->ucore.bits.frag_size > QMP_MAX_PKT_SIZE) {
   525				dev_err(mdev->dev, "Rx frag size error %d\n",
   526					mdev->ucore.bits.frag_size);
   527				break;
   528			}
   529	
   530			qmp_recv_data(mdev, QMP_CTRL_DATA_SIZE);
   531			break;
   532		case LOCAL_DISCONNECTING:
   533			if (!QMP_MCORE_CH_VAR_GET(mdev, ch_state)) {
   534				clr_mcore_ch_state(mdev);
   535				mdev->local_state = LINK_CONNECTED;
   536				dev_dbg(mdev->dev, "Channel closed");
   537				reinit_completion(&mdev->ch_complete);
   538			}
   539	
   540			break;
   541		default:
   542			dev_err(mdev->dev, "Local Channel State corrupted\n");
   543		}
   544		mutex_unlock(&mdev->state_lock);
   545	}
   546
kernel test robot Jan. 1, 2025, 9:34 a.m. UTC | #5
Hi Sricharan,

kernel test robot noticed the following build warnings:

[auto build test WARNING on robh/for-next]
[also build test WARNING on linus/master v6.13-rc5 next-20241220]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Sricharan-R/dt-bindings-mailbox-Document-qcom-tmel-qmp/20241231-135219
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
patch link:    https://lore.kernel.org/r/20241231054900.2144961-3-quic_srichara%40quicinc.com
patch subject: [PATCH V2 2/2] mailbox: tmelite-qmp: Introduce TMEL QMP mailbox driver
config: i386-randconfig-005-20250101 (https://download.01.org/0day-ci/archive/20250101/202501011724.6gr0JxBf-lkp@intel.com/config)
compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250101/202501011724.6gr0JxBf-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501011724.6gr0JxBf-lkp@intel.com/

All warnings (new ones prefixed by >>):

   In file included from drivers/mailbox/qcom-tmel-qmp.c:10:
   In file included from include/linux/dma-mapping.h:8:
   In file included from include/linux/scatterlist.h:8:
   In file included from include/linux/mm.h:2223:
   include/linux/vmstat.h:518:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
     518 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
         |                               ~~~~~~~~~~~ ^ ~~~
>> drivers/mailbox/qcom-tmel-qmp.c:312:55: warning: format specifies type 'unsigned long' but the argument has type 'size_t' (aka 'unsigned int') [-Wformat]
     312 |                 dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
         |                                                             ~~~     ^~~~~~~~~~~~
         |                                                             %zu
   include/linux/dev_printk.h:154:65: note: expanded from macro 'dev_err'
     154 |         dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__)
         |                                                                ~~~     ^~~~~~~~~~~
   include/linux/dev_printk.h:110:23: note: expanded from macro 'dev_printk_index_wrap'
     110 |                 _p_func(dev, fmt, ##__VA_ARGS__);                       \
         |                              ~~~    ^~~~~~~~~~~
   drivers/mailbox/qcom-tmel-qmp.c:710:10: warning: format specifies type 'unsigned long' but the argument has type 'size_t' (aka 'unsigned int') [-Wformat]
     709 |                 pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
         |                                                         ~~~
         |                                                         %zu
     710 |                        tdev->pkt.iov_len, sizeof(struct tmel_ipc_pkt));
         |                        ^~~~~~~~~~~~~~~~~
   include/linux/printk.h:544:33: note: expanded from macro 'pr_err'
     544 |         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
         |                                ~~~     ^~~~~~~~~~~
   include/linux/printk.h:501:60: note: expanded from macro 'printk'
     501 | #define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__)
         |                                                     ~~~    ^~~~~~~~~~~
   include/linux/printk.h:473:19: note: expanded from macro 'printk_index_wrap'
     473 |                 _p_func(_fmt, ##__VA_ARGS__);                           \
         |                         ~~~~    ^~~~~~~~~~~
   drivers/mailbox/qcom-tmel-qmp.c:834:6: warning: variable 'ret' set but not used [-Wunused-but-set-variable]
     834 |         int ret;
         |             ^
   4 warnings generated.


vim +312 drivers/mailbox/qcom-tmel-qmp.c

   290	
   291	/**
   292	 * qmp_send_data() - Copy the data to the channel's mailbox and notify
   293	 *		     remote subsystem of new data. This function will
   294	 *		     return an error if the previous message sent has
   295	 *		     not been read. Cannot Sleep.
   296	 * @chan:	mailbox channel that data is to be sent over.
   297	 * @data:	Data to be sent to remote processor, should be in the format of
   298	 *		a kvec.
   299	 *
   300	 * Return: 0 on succes or standard Linux error code.
   301	 */
   302	static int qmp_send_data(struct qmp_device *mdev, void *data)
   303	{
   304		struct kvec *pkt = (struct kvec *)data;
   305		void __iomem *addr;
   306		unsigned long flags;
   307	
   308		if (!mdev || !data || !completion_done(&mdev->ch_complete))
   309			return -EINVAL;
   310	
   311		if (pkt->iov_len > QMP_MAX_PKT_SIZE) {
 > 312			dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
   313			return -EINVAL;
   314		}
   315	
   316		spin_lock_irqsave(&mdev->tx_lock, flags);
   317		if (mdev->tx_sent) {
   318			spin_unlock_irqrestore(&mdev->tx_lock, flags);
   319			return -EAGAIN;
   320		}
   321	
   322		dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
   323			mdev->mcore.val, mdev->ucore.val);
   324	
   325		addr = mdev->mcore_desc + QMP_CTRL_DATA_SIZE;
   326		memcpy_toio(addr, pkt->iov_base, pkt->iov_len);
   327	
   328		mdev->mcore.bits.frag_size = pkt->iov_len;
   329		mdev->mcore.bits.rem_frag_count = 0;
   330	
   331		dev_dbg(mdev->dev, "Copied buffer to mbox, sz: %d",
   332			mdev->mcore.bits.frag_size);
   333	
   334		mdev->tx_sent = true;
   335		QMP_MCORE_CH_VAR_TOGGLE(mdev, tx);
   336		qmp_send_irq(mdev);
   337		qmp_schedule_tx_timeout(mdev);
   338		spin_unlock_irqrestore(&mdev->tx_lock, flags);
   339	
   340		return 0;
   341	}
   342
Sricharan Ramabadhran Jan. 3, 2025, 12:09 p.m. UTC | #6
On 12/31/2024 11:51 AM, Varadarajan Narayanan wrote:
> On Tue, Dec 31, 2024 at 11:19:00AM +0530, Sricharan R wrote:
>> From: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>>
>> This mailbox facilitates the communication between the TME-L server based
>> subsystems (Q6) and the TME-L client (APPSS/BTSS/AUDIOSS), used for security
>> services like secure image authentication, enable/disable efuses, crypto
>> services. Each client in the   SoC has its own block of message RAM and IRQ
> 
> Extra space before 'SoC'.
ok, will fix.

> 
>> for communication with the TME-L SS. The protocol used to communicate in the
>> message RAM is known as Qualcomm Messaging Protocol (QMP).
>>
>> Remote proc driver subscribes to this mailbox and uses the mbox_send_message
>> to use TME-L to securely authenticate/teardown the images.
>>
>> Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>> ---
>>    [v2] Added worker for mailbox tx processing, since some of the operations can sleep
>>         Fixed checkpatch warnings. Some [CHECK] like below still exist, but that looks
>>         like a false postive.
>>
>>         CHECK: Macro argument 'm' may be better as '(m)' to avoid precedence issues
>>          #1072: FILE: include/linux/mailbox/tmelcom-qmp.h:40:
>>          +#define TMEL_MSG_UID_CREATE(m, a)      ((u32)(((m & 0xff) << 8) | (a & 0xff)))
>>
>>   drivers/mailbox/Kconfig             |   7 +
>>   drivers/mailbox/Makefile            |   2 +
>>   drivers/mailbox/qcom-tmel-qmp.c     | 971 ++++++++++++++++++++++++++++
>>   include/linux/mailbox/tmelcom-qmp.h | 157 +++++
>>   4 files changed, 1137 insertions(+)
>>   create mode 100644 drivers/mailbox/qcom-tmel-qmp.c
>>   create mode 100644 include/linux/mailbox/tmelcom-qmp.h
>>
>> diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
>> index 8ecba7fb999e..8ad0b834d617 100644
>> --- a/drivers/mailbox/Kconfig
>> +++ b/drivers/mailbox/Kconfig
>> @@ -306,4 +306,11 @@ config THEAD_TH1520_MBOX
>>   	  kernel is running, and E902 core used for power management among other
>>   	  things.
>>
>> +config QCOM_TMEL_QMP_MAILBOX
>> +	tristate "QCOM Mailbox Protocol(QMP) for TME-L SS"
> 
> Please add the usual checks to QCOM_TMEL_QMP_MAILBOX to avoid randomconfig bot errors.
> 
ok.

>> +	help
>> +	  Say yes to add support for the QMP Mailbox Protocol driver for TME-L.
>> +	  QMP is a lightweight communication protocol for sending messages to
>> +	  TME-L. This protocol fits into the Generic Mailbox Framework.
>> +	  QMP uses a mailbox registers.
>>   endif
>> diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
>> index 5f4f5b0ce2cc..4dba283a94ad 100644
>> --- a/drivers/mailbox/Makefile
>> +++ b/drivers/mailbox/Makefile
>> @@ -66,3 +66,5 @@ obj-$(CONFIG_QCOM_CPUCP_MBOX)	+= qcom-cpucp-mbox.o
>>   obj-$(CONFIG_QCOM_IPCC)		+= qcom-ipcc.o
>>
>>   obj-$(CONFIG_THEAD_TH1520_MBOX)	+= mailbox-th1520.o
>> +
>> +obj-$(CONFIG_QCOM_TMEL_QMP_MAILBOX) += qcom-tmel-qmp.o
>> diff --git a/drivers/mailbox/qcom-tmel-qmp.c b/drivers/mailbox/qcom-tmel-qmp.c
>> new file mode 100644
>> index 000000000000..6de0a418e0ae
>> --- /dev/null
>> +++ b/drivers/mailbox/qcom-tmel-qmp.c
>> @@ -0,0 +1,971 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/*
>> + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
>> + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
>> + */
> 
> Copyright year should be updated if you post next version.
> 
ok.

>> +
>> +#include <linux/completion.h>
>> +#include <linux/delay.h>
>> +#include <linux/dma-direction.h>
>> +#include <linux/dma-mapping.h>
>> +#include <linux/init.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/io.h>
>> +#include <linux/kernel.h>
>> +#include <linux/kthread.h>
>> +#include <linux/mailbox_client.h>
>> +#include <linux/mailbox_controller.h>
>> +#include <linux/mailbox/tmelcom-qmp.h>
>> +#include <linux/module.h>
>> +#include <linux/of.h>
>> +#include <linux/of_irq.h>
>> +#include <linux/of_platform.h>
>> +#include <linux/platform_device.h>
>> +#include <linux/spinlock.h>
>> +#include <linux/types.h>
>> +#include <linux/uaccess.h>
>> +#include <linux/uio.h>
>> +#include <linux/workqueue.h>
>> +
>> +#define QMP_NUM_CHANS	0x1
>> +#define QMP_TOUT_MS	1000
>> +#define MBOX_ALIGN_BYTES	3
>> +#define QMP_CTRL_DATA_SIZE	4
>> +#define QMP_MAX_PKT_SIZE	0x18
>> +#define QMP_UCORE_DESC_OFFSET	0x1000
>> +
>> +#define QMP_CH_VAR_GET(mdev, desc, var) ((mdev)->desc.bits.var)
>> +#define QMP_CH_VAR_SET(mdev, desc, var) (mdev)->desc.bits.var = 1
>> +#define QMP_CH_VAR_CLR(mdev, desc, var) (mdev)->desc.bits.var = 0
>> +
>> +#define QMP_MCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, mcore, var)
>> +#define QMP_MCORE_CH_VAR_SET(mdev, var)	QMP_CH_VAR_SET(mdev, mcore, var)
>> +#define QMP_MCORE_CH_VAR_CLR(mdev, var)	QMP_CH_VAR_CLR(mdev, mcore, var)
>> +
>> +#define QMP_MCORE_CH_VAR_TOGGLE(mdev, var) \
>> +	(mdev)->mcore.bits.var = !((mdev)->mcore.bits.var)
>> +#define QMP_MCORE_CH_ACKED_CHECK(mdev, var) \
>> +	((mdev)->ucore.bits.var == (mdev)->mcore.bits.var##_ack)
>> +#define QMP_MCORE_CH_ACK_UPDATE(mdev, var) \
>> +	(mdev)->mcore.bits.var##_ack = (mdev)->ucore.bits.var
>> +#define QMP_MCORE_CH_VAR_ACK_CLR(mdev, var) \
>> +	(mdev)->mcore.bits.var##_ack = 0
>> +
>> +#define QMP_UCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, ucore, var)
>> +#define QMP_UCORE_CH_ACKED_CHECK(mdev, var) \
>> +	((mdev)->mcore.bits.var == (mdev)->ucore.bits.var##_ack)
>> +#define QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, var) \
>> +	((mdev)->ucore.bits.var != (mdev)->mcore.bits.var##_ack)
>> +
>> +/**
>> + * enum qmp_local_state -	definition of the local state machine
>> + * @LINK_DISCONNECTED:		Init state, waiting for ucore to start
>> + * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
>> + * @LINK_CONNECTED:		Link state up, channel not connected
>> + * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
>> + * @CHANNEL_CONNECTED:		Channel fully opened
>> + * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack
>> + */
>> +enum qmp_local_state {
>> +	LINK_DISCONNECTED,
>> +	LINK_NEGOTIATION,
>> +	LINK_CONNECTED,
>> +	LOCAL_CONNECTING,
>> +	CHANNEL_CONNECTED,
>> +	LOCAL_DISCONNECTING,
>> +};
>> +
>> +union channel_desc {
>> +	struct {
>> +		u32 link_state:1;
>> +		u32 link_state_ack:1;
>> +		u32 ch_state:1;
>> +		u32 ch_state_ack:1;
>> +		u32 tx:1;
>> +		u32 tx_ack:1;
>> +		u32 rx_done:1;
>> +		u32 rx_done_ack:1;
>> +		u32 read_int:1;
>> +		u32 read_int_ack:1;
>> +		u32 reserved:6;
>> +		u32 frag_size:8;
>> +		u32 rem_frag_count:8;
>> +	} bits;
>> +	unsigned int val;
>> +};
>> +
>> +struct qmp_work {
>> +	struct work_struct work;
>> +	void *data;
>> +};
>> +
>> +/**
>> + * struct qmp_device - local information for managing a single mailbox
>> + * @dev:	    The device that corresponds to this mailbox
>> + * @ctrl:	    The mbox controller for this mailbox
>> + * @mcore_desc:	    Local core (APSS) mailbox descriptor
>> + * @ucore_desc:	    Remote core (TME-L) mailbox descriptor
>> + * @mcore:	    Local core (APSS) channel descriptor
>> + * @ucore:	    Remote core (TME-L) channel descriptor
>> + * @rx_pkt:	    Buffer to pass to client, holds received data from mailbox
>> + * @tx_pkt:	    Buffer from client, holds data to send on mailbox
>> + * @mbox_client:    Mailbox client for the IPC interrupt
>> + * @mbox_chan:	    Mailbox client chan for the IPC interrupt
>> + * @local_state:    Current state of mailbox protocol
>> + * @state_lock:	    Serialize mailbox state changes
>> + * @tx_lock:	    Serialize access for writes to mailbox
>> + * @link_complete:  Use to block until link negotiation with remote proc
>> + * @ch_complete:    Use to block until the channel is fully opened
>> + * @dwork:	    Delayed work to detect timed out tx
>> + * @tx_sent:	    True if tx is sent and remote proc has not sent ack
>> + */
>> +struct qmp_device {
>> +	struct device *dev;
>> +	struct mbox_controller ctrl;
>> +	struct qmp_work qwork;
>> +
>> +	void __iomem *mcore_desc;
>> +	void __iomem *ucore_desc;
>> +	union channel_desc mcore;
>> +	union channel_desc ucore;
>> +
>> +	struct kvec rx_pkt;
>> +	struct kvec tx_pkt;
>> +
>> +	struct mbox_client mbox_client;
>> +	struct mbox_chan *mbox_chan;
>> +
>> +	enum qmp_local_state local_state;
>> +
>> +	/* Lock for QMP link state changes */
>> +	struct mutex state_lock;
>> +	/* Lock to serialize access to mailbox */
>> +	spinlock_t tx_lock;
>> +
>> +	struct completion link_complete;
>> +	struct completion ch_complete;
>> +	struct delayed_work dwork;
>> +	void *data;
>> +
>> +	bool tx_sent;
>> +	bool ch_in_use;
>> +};
>> +
>> +struct tmel_msg_param_type_buf_in {
>> +	u32 buf;
>> +	u32 buf_len;
>> +};
>> +
>> +struct tmel_secboot_sec_auth_req {
>> +	u32 sw_id;
>> +	struct tmel_msg_param_type_buf_in elf_buf;
>> +	struct tmel_msg_param_type_buf_in region_list;
>> +	u32 relocate;
>> +} __packed;
>> +
>> +struct tmel_secboot_sec_auth_resp {
>> +	u32 first_seg_addr;
>> +	u32 first_seg_len;
>> +	u32 entry_addr;
>> +	u32 extended_error;
>> +	u32 status;
>> +} __packed;
>> +
>> +struct tmel_secboot_sec_auth {
>> +	struct tmel_secboot_sec_auth_req req;
>> +	struct tmel_secboot_sec_auth_resp resp;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown_req {
>> +	u32 sw_id;
>> +	u32 secondary_sw_id;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown_resp {
>> +	u32 status;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown {
>> +	struct tmel_secboot_teardown_req req;
>> +	struct tmel_secboot_teardown_resp resp;
>> +} __packed;
>> +
>> +struct tmel {
>> +	struct device *dev;
>> +	struct qmp_device *mdev;
>> +	struct kvec pkt;
>> +	/* To serialize incoming tmel request */
>> +	struct mutex lock;
>> +	struct tmel_ipc_pkt *ipc_pkt;
>> +	dma_addr_t sram_dma_addr;
>> +	wait_queue_head_t waitq;
>> +	bool rx_done;
>> +};
> 
> Typically it is expected that structs/unions/enums/functions have
> a similar prefix for the entire driver. Here multiple prefixes
> like qmp_local_state, channel_desc, qmp_work, tmel_xxx are used
> please see if it is possible to change.
> 
There are 2 parts to this driver, one for tmel protocol and
other for qmp protocol. Hence kept different prefixes accordingly.
That said, will fix in all places only to have either of these 2.

>> +static struct tmel *tmeldev;
> 
> Can this be avoided?
> 
yes, will remove the global in next version.

>> +/**
>> + * qmp_send_irq() - send an irq to a remote entity as an event signal.
>> + * @mdev:       Which remote entity that should receive the irq.
>> + */
>> +static void qmp_send_irq(struct qmp_device *mdev)
>> +{
>> +	/* Update the mcore val to mcore register */
> 
> Remove ^^^^
> 
ok.

>> +	iowrite32(mdev->mcore.val, mdev->mcore_desc);
>> +	/* Ensure desc update is visible before IPC */
>> +	wmb();
>> +
>> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
>> +		mdev->mcore.val, mdev->ucore.val);
>> +
>> +	mbox_send_message(mdev->mbox_chan, NULL);
>> +	mbox_client_txdone(mdev->mbox_chan, 0);
>> +}
>> +
>> +/**
>> + * qmp_notify_timeout() - Notify client of tx timeout with -ETIME
>> + * @work:		  Structure for work that was scheduled.
>> + */
>> +static void qmp_notify_timeout(struct work_struct *work)
>> +{
>> +	struct delayed_work *dwork = to_delayed_work(work);
>> +	struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
>> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
>> +	int err = -ETIME;
> 
> 'int err' can be removed and -ETIME can be used directly in
> mbox_chan_txdone
> 
ok.

>> +	unsigned long flags;
> 
> This and other functions reverse xmas tree for variable
> declaration.
> 
ok.

>> +
>> +	spin_lock_irqsave(&mdev->tx_lock, flags);
>> +	if (!mdev->tx_sent) {
>> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +		return;
>> +	}
>> +	mdev->tx_sent = false;
>> +	spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +	dev_dbg(mdev->dev, "%s: TX timeout", __func__);
>> +	mbox_chan_txdone(chan, err);
>> +}
>> +
>> +static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
>> +{
>> +	schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TOUT_MS));
>> +}
>> +
>> +/**
>> + * tmel_qmp_startup() - Start qmp mailbox channel for communication. Waits for
>> + *		       remote subsystem to open channel if link is not
>> + *		       initated or until timeout.
>> + * @chan:	       mailbox channel that is being opened.
>> + *
>> + * Return: 0 on succes or standard Linux error code.
> 
> success
> 
ok.

>> + */
>> +static int tmel_qmp_startup(struct mbox_chan *chan)
>> +{
>> +	struct qmp_device *mdev = chan->con_priv;
>> +	int ret;
>> +
>> +	if (!mdev)
>> +		return -EINVAL;
>> +
>> +	ret = wait_for_completion_timeout(&mdev->link_complete,
>> +					  msecs_to_jiffies(QMP_TOUT_MS));
>> +	if (!ret)
>> +		return -EAGAIN;
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +	if (mdev->local_state == LINK_CONNECTED) {
>> +		QMP_MCORE_CH_VAR_SET(mdev, ch_state);
>> +		mdev->local_state = LOCAL_CONNECTING;
>> +		dev_dbg(mdev->dev, "link complete, local connecting");
>> +		qmp_send_irq(mdev);
>> +	}
>> +	mutex_unlock(&mdev->state_lock);
>> +
>> +	ret = wait_for_completion_timeout(&mdev->ch_complete,
>> +					  msecs_to_jiffies(QMP_TOUT_MS));
>> +	if (!ret)
>> +		return -ETIME;
>> +
>> +	return 0;
>> +}
>> +
>> +/**
>> + * qmp_send_data() - Copy the data to the channel's mailbox and notify
>> + *		     remote subsystem of new data. This function will
>> + *		     return an error if the previous message sent has
>> + *		     not been read. Cannot Sleep.
>> + * @chan:	mailbox channel that data is to be sent over.
>> + * @data:	Data to be sent to remote processor, should be in the format of
>> + *		a kvec.
>> + *
>> + * Return: 0 on succes or standard Linux error code.
> 
> success
> 
ok.

>> + */
>> +static int qmp_send_data(struct qmp_device *mdev, void *data)
>> +{
>> +	struct kvec *pkt = (struct kvec *)data;
>> +	void __iomem *addr;
>> +	unsigned long flags;
>> +
>> +	if (!mdev || !data || !completion_done(&mdev->ch_complete))
>> +		return -EINVAL;
>> +
>> +	if (pkt->iov_len > QMP_MAX_PKT_SIZE) {
>> +		dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
>> +		return -EINVAL;
>> +	}
>> +
>> +	spin_lock_irqsave(&mdev->tx_lock, flags);
>> +	if (mdev->tx_sent) {
>> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +		return -EAGAIN;
>> +	}
>> +
>> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
>> +		mdev->mcore.val, mdev->ucore.val);
>> +
>> +	addr = mdev->mcore_desc + QMP_CTRL_DATA_SIZE;
>> +	memcpy_toio(addr, pkt->iov_base, pkt->iov_len);
>> +
>> +	mdev->mcore.bits.frag_size = pkt->iov_len;
>> +	mdev->mcore.bits.rem_frag_count = 0;
>> +
>> +	dev_dbg(mdev->dev, "Copied buffer to mbox, sz: %d",
>> +		mdev->mcore.bits.frag_size);
>> +
>> +	mdev->tx_sent = true;
>> +	QMP_MCORE_CH_VAR_TOGGLE(mdev, tx);
>> +	qmp_send_irq(mdev);
> 
> In all places qmp_send_irq is invoked under mutex of
> "state_lock". But in qmp_send_data alone it seems to be invoked
> under spin_lock of "tx_lock". While qmp_send_data itself is
> called under mutex of tdev->lock. Hope that is not a potential
> race.
> 
ok, whole of the locking needs some fixing. Infact, with little
more changes, no need to have the 2 mutexes. will remove them and
just have the spinlock for updates to m/u core registers and
state machine tracking.

>> +	qmp_schedule_tx_timeout(mdev);
>> +	spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +
>> +	return 0;
>> +}
>> +
>> +/**
>> + * tmel_qmp_shutdown() - Disconnect this mailbox channel so the client does not
>> + *			 receive anymore data and can reliquish control
>> + *			 of the channel.
>> + * @chan:		 mailbox channel to be shutdown.
>> + */
>> +static void tmel_qmp_shutdown(struct mbox_chan *chan)
>> +{
>> +	struct qmp_device *mdev = chan->con_priv;
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +	if (mdev->local_state != LINK_DISCONNECTED) {
>> +		mdev->local_state = LOCAL_DISCONNECTING;
>> +		QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
>> +		qmp_send_irq(mdev);
>> +	}
>> +	mutex_unlock(&mdev->state_lock);
>> +}
>> +
>> +static void tmel_receive_message(void *message)
>> +{
>> +	struct tmel *tdev = tmeldev;
>> +	struct kvec *pkt = NULL;
>> +
>> +	if (!message) {
>> +		pr_err("spurious message received\n");
>> +		goto tmel_receive_end;
>> +	}
>> +
>> +	if (tdev->rx_done) {
>> +		pr_err("tmel response pending\n");
>> +		goto tmel_receive_end;
>> +	}
>> +
>> +	pkt = (struct kvec *)message;
>> +	tdev->pkt.iov_len = pkt->iov_len;
>> +	tdev->pkt.iov_base = pkt->iov_base;
>> +	tdev->rx_done = true;
>> +
>> +tmel_receive_end:
>> +	wake_up_interruptible(&tdev->waitq);
>> +}
>> +
>> +/**
>> + * qmp_recv_data() -	received notification that data is available in the
>> + *			mailbox. Copy data from mailbox and pass to client.
>> + * @mbox:		mailbox device that received the notification.
>> + * @mbox_of:		offset of mailbox after QMP Control data.
>> + */
>> +static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
>> +{
>> +	void __iomem *addr;
>> +	struct kvec *pkt;
>> +
>> +	addr = mdev->ucore_desc + mbox_of;
>> +	pkt = &mdev->rx_pkt;
>> +	pkt->iov_len = mdev->ucore.bits.frag_size;
>> +
>> +	memcpy_fromio(pkt->iov_base, addr, pkt->iov_len);
>> +	QMP_MCORE_CH_ACK_UPDATE(mdev, tx);
>> +	dev_dbg(mdev->dev, "%s: Send RX data to TMEL Client", __func__);
>> +	tmel_receive_message(pkt);
>> +
>> +	QMP_MCORE_CH_VAR_TOGGLE(mdev, rx_done);
>> +	qmp_send_irq(mdev);
>> +}
>> +
>> +/**
>> + * clr_mcore_ch_state() - Clear the mcore state of a mailbox.
>> + * @mdev:	mailbox device to be initialized.
>> + */
>> +static void clr_mcore_ch_state(struct qmp_device *mdev)
>> +{
>> +	QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
>> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, ch_state);
>> +
>> +	QMP_MCORE_CH_VAR_CLR(mdev, tx);
>> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, tx);
>> +
>> +	QMP_MCORE_CH_VAR_CLR(mdev, rx_done);
>> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, rx_done);
>> +
>> +	QMP_MCORE_CH_VAR_CLR(mdev, read_int);
>> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, read_int);
>> +
>> +	mdev->mcore.bits.frag_size = 0;
>> +	mdev->mcore.bits.rem_frag_count = 0;
>> +}
>> +
>> +/**
>> + * qmp_rx() - Handle incoming messages from remote processor.
>> + * @mbox:	mailbox device that received notification.
>> + */
>> +static void qmp_rx(struct qmp_device *mdev)
>> +{
>> +	unsigned long flags;
>> +
>> +	/* read remote_desc from mailbox register */
>> +	mdev->ucore.val = ioread32(mdev->ucore_desc);
>> +
>> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
>> +		mdev->mcore.val, mdev->ucore.val);
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +
>> +	/* Check if remote link down */
>> +	if (mdev->local_state >= LINK_CONNECTED &&
>> +	    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
>> +		mdev->local_state = LINK_NEGOTIATION;
>> +		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
>> +		qmp_send_irq(mdev);
>> +		mutex_unlock(&mdev->state_lock);
>> +		return;
>> +	}
>> +
>> +	switch (mdev->local_state) {
>> +	case LINK_DISCONNECTED:
>> +		QMP_MCORE_CH_VAR_SET(mdev, link_state);
>> +		mdev->local_state = LINK_NEGOTIATION;
>> +		mdev->rx_pkt.iov_base = kzalloc(QMP_MAX_PKT_SIZE,
>> +						GFP_KERNEL);
>> +
>> +		if (!mdev->rx_pkt.iov_base) {
>> +			dev_err(mdev->dev, "rx pkt alloc failed");
>> +			break;
>> +		}
>> +		dev_dbg(mdev->dev, "Set to link negotiation");
>> +		qmp_send_irq(mdev);
>> +
>> +		break;
>> +	case LINK_NEGOTIATION:
>> +		if (!QMP_MCORE_CH_VAR_GET(mdev, link_state) ||
>> +		    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
>> +			dev_err(mdev->dev, "rx irq:link down state\n");
>> +			break;
>> +		}
>> +
>> +		clr_mcore_ch_state(mdev);
>> +		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
>> +		mdev->local_state = LINK_CONNECTED;
>> +		complete_all(&mdev->link_complete);
>> +		dev_dbg(mdev->dev, "Set to link connected");
>> +
>> +		break;
>> +	case LINK_CONNECTED:
>> +		/* No need to handle until local opens */
>> +		break;
>> +	case LOCAL_CONNECTING:
>> +		/* Ack to remote ch_state change */
>> +		QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
>> +
>> +		mdev->local_state = CHANNEL_CONNECTED;
>> +		complete_all(&mdev->ch_complete);
>> +		dev_dbg(mdev->dev, "Set to channel connected");
>> +		qmp_send_irq(mdev);
>> +		break;
>> +	case CHANNEL_CONNECTED:
>> +		/* Check for remote channel down */
>> +		if (!QMP_UCORE_CH_VAR_GET(mdev, ch_state)) {
>> +			mdev->local_state = LOCAL_CONNECTING;
>> +			QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
>> +			dev_dbg(mdev->dev, "Remote Disconnect");
>> +			qmp_send_irq(mdev);
>> +		}
>> +
>> +		spin_lock_irqsave(&mdev->tx_lock, flags);
>> +		/* Check TX done */
>> +		if (mdev->tx_sent &&
>> +		    QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, rx_done)) {
>> +			/* Ack to remote */
>> +			QMP_MCORE_CH_ACK_UPDATE(mdev, rx_done);
>> +			mdev->tx_sent = false;
>> +			cancel_delayed_work(&mdev->dwork);
>> +			dev_dbg(mdev->dev, "TX flag cleared");
>> +		}
>> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +
>> +		/* Check if remote is Transmitting */
>> +		if (!QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, tx))
>> +			break;
>> +		if (mdev->ucore.bits.frag_size == 0 ||
>> +		    mdev->ucore.bits.frag_size > QMP_MAX_PKT_SIZE) {
>> +			dev_err(mdev->dev, "Rx frag size error %d\n",
>> +				mdev->ucore.bits.frag_size);
>> +			break;
>> +		}
>> +
>> +		qmp_recv_data(mdev, QMP_CTRL_DATA_SIZE);
>> +		break;
>> +	case LOCAL_DISCONNECTING:
>> +		if (!QMP_MCORE_CH_VAR_GET(mdev, ch_state)) {
>> +			clr_mcore_ch_state(mdev);
>> +			mdev->local_state = LINK_CONNECTED;
>> +			dev_dbg(mdev->dev, "Channel closed");
>> +			reinit_completion(&mdev->ch_complete);
>> +		}
>> +
>> +		break;
>> +	default:
>> +		dev_err(mdev->dev, "Local Channel State corrupted\n");
>> +	}
>> +	mutex_unlock(&mdev->state_lock);
>> +}
>> +
>> +static irqreturn_t qmp_irq_handler(int irq, void *priv)
>> +{
>> +	struct qmp_device *mdev = (struct qmp_device *)priv;
>> +
>> +	qmp_rx(mdev);
>> +
>> +	return IRQ_HANDLED;
>> +}
>> +
>> +static int tmel_qmp_parse_devicetree(struct platform_device *pdev,
>> +				     struct qmp_device *mdev)
>> +{
>> +	struct device *dev = &pdev->dev;
>> +
>> +	mdev->mcore_desc = devm_platform_ioremap_resource(pdev, 0);
>> +	if (!mdev->mcore_desc) {
>> +		dev_err(dev, "ioremap failed for mcore reg\n");
>> +		return -EIO;
>> +	}
>> +
>> +	mdev->ucore_desc = mdev->mcore_desc + QMP_UCORE_DESC_OFFSET;
>> +
>> +	mdev->mbox_client.dev = dev;
>> +	mdev->mbox_client.knows_txdone = false;
>> +	mdev->mbox_chan = mbox_request_channel(&mdev->mbox_client, 0);
>> +	if (IS_ERR(mdev->mbox_chan)) {
>> +		dev_err(dev, "mbox chan for IPC is missing\n");
>> +		return PTR_ERR(mdev->mbox_chan);
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_qmp_remove(struct platform_device *pdev)
>> +{
>> +	struct qmp_device *mdev = platform_get_drvdata(pdev);
>> +
>> +	mbox_controller_unregister(&mdev->ctrl);
>> +	kfree(mdev->rx_pkt.iov_base);
>> +}
>> +
>> +static struct device *tmel_get_device(void)
>> +{
>> +	struct tmel *tdev = tmeldev;
>> +
>> +	if (!tdev)
>> +		return NULL;
>> +
>> +	return tdev->dev;
>> +}
>> +
>> +static int tmel_prepare_msg(struct tmel *tdev, u32 msg_uid,
>> +			    void *msg_buf, size_t msg_size)
>> +{
>> +	struct tmel_ipc_pkt *ipc_pkt = tdev->ipc_pkt;
>> +	struct ipc_header *msg_hdr = &ipc_pkt->msg_hdr;
>> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
>> +	struct sram_payload *sram_payload = &ipc_pkt->payload.sram_payload;
>> +	int ret;
>> +
>> +	memset(ipc_pkt, 0, sizeof(struct tmel_ipc_pkt));
>> +
>> +	msg_hdr->msg_type = TMEL_MSG_UID_MSG_TYPE(msg_uid);
>> +	msg_hdr->action_id = TMEL_MSG_UID_ACTION_ID(msg_uid);
>> +
>> +	pr_debug("uid: %d, msg_size: %zu msg_type:%d, action_id:%d\n",
>> +		 msg_uid, msg_size, msg_hdr->msg_type, msg_hdr->action_id);
>> +
>> +	if (sizeof(struct ipc_header) + msg_size <= MBOX_IPC_PACKET_SIZE) {
>> +		/* Mbox only */
>> +		msg_hdr->ipc_type = IPC_MBOX_ONLY;
>> +		msg_hdr->msg_len = msg_size;
>> +		memcpy((void *)mbox_payload, msg_buf, msg_size);
>> +	} else if (msg_size <= SRAM_IPC_MAX_BUF_SIZE) {
>> +		/* SRAM */
>> +		msg_hdr->ipc_type = IPC_MBOX_SRAM;
>> +		msg_hdr->msg_len = 8;
>> +
>> +		tdev->sram_dma_addr = dma_map_single(tdev->dev, msg_buf,
>> +						     msg_size,
>> +						     DMA_BIDIRECTIONAL);
>> +		ret = dma_mapping_error(tdev->dev, tdev->sram_dma_addr);
>> +		if (ret != 0) {
> 
> if (ret)
ok.

> 
>> +			pr_err("SRAM DMA mapping error: %d\n", ret);
>> +			return ret;
>> +		}
>> +
>> +		sram_payload->payload_ptr = tdev->sram_dma_addr;
>> +		sram_payload->payload_len = msg_size;
>> +	} else {
>> +		pr_err("Invalid payload length: %zu\n", msg_size);
> 
> return error??
> 
ok.

>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_unprepare_message(struct tmel *tdev,
>> +				   void *msg_buf, size_t msg_size)
>> +{
>> +	struct tmel_ipc_pkt *ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
>> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
>> +
>> +	if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_ONLY) {
>> +		memcpy(msg_buf, (void *)mbox_payload, msg_size);
>> +	} else if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_SRAM) {
>> +		dma_unmap_single(tdev->dev, tdev->sram_dma_addr, msg_size,
>> +				 DMA_BIDIRECTIONAL);
>> +		tdev->sram_dma_addr = 0;
>> +	}
>> +}
>> +
>> +static bool tmel_rx_done(struct tmel *tdev)
>> +{
>> +	return tdev->rx_done;
>> +}
>> +
>> +static int tmel_process_request(u32 msg_uid, void *msg_buf,
>> +				size_t msg_size)
>> +{
>> +	struct tmel *tdev = tmeldev;
>> +	unsigned long jiffies;
>> +	struct tmel_ipc_pkt *resp_ipc_pkt;
>> +	long time_left = 0;
>> +	int ret = 0;
>> +
>> +	/*
>> +	 * Check to handle if probe is not successful or not completed yet
>> +	 */
>> +	if (!tdev) {
>> +		pr_err("tmel dev is NULL\n");
>> +		return -ENODEV;
>> +	}
>> +
>> +	if (!msg_buf || !msg_size) {
>> +		pr_err("Invalid msg_buf or msg_size\n");
>> +		return -EINVAL;
>> +	}
>> +
>> +	mutex_lock(&tdev->lock);
>> +	tdev->rx_done = false;
>> +
>> +	ret = tmel_prepare_msg(tdev, msg_uid, msg_buf, msg_size);
>> +	if (ret)
> 
> mutex_unlock?
> 
ok, will be removed with updated locking in next version.

>> +		return ret;
>> +
>> +	tdev->pkt.iov_len = sizeof(struct tmel_ipc_pkt);
>> +	tdev->pkt.iov_base = (void *)tdev->ipc_pkt;
>> +
>> +	qmp_send_data(tdev->mdev, &tdev->pkt);
>> +	jiffies = msecs_to_jiffies(30000);
> 
> #define for 30000
> 
ok.

>> +
>> +	time_left = wait_event_interruptible_timeout(tdev->waitq,
>> +						     tmel_rx_done(tdev),
>> +						     jiffies);
>> +
>> +	if (!time_left) {
>> +		pr_err("Request timed out\n");
>> +		ret = -ETIMEDOUT;
>> +		goto err_exit;
>> +	}
>> +
>> +	if (tdev->pkt.iov_len != sizeof(struct tmel_ipc_pkt)) {
>> +		pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
>> +		       tdev->pkt.iov_len, sizeof(struct tmel_ipc_pkt));
>> +		ret = -EPROTO;
>> +		goto err_exit;
>> +	}
>> +
>> +	resp_ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
>> +	tmel_unprepare_message(tdev, msg_buf, msg_size);
>> +	tdev->rx_done = false;
>> +	ret = resp_ipc_pkt->msg_hdr.response;
>> +
>> +err_exit:
>> +	mutex_unlock(&tdev->lock);
>> +	return ret;
>> +}
>> +
>> +static int tmel_secboot_sec_auth(u32 sw_id, void *metadata, size_t size)
>> +{
>> +	struct device *dev = tmel_get_device();
>> +	struct tmel_secboot_sec_auth *msg;
>> +	dma_addr_t elf_buf_phys;
>> +	void *elf_buf;
>> +	int ret;
>> +
>> +	if (!dev || !metadata)
>> +		return -EINVAL;
>> +
>> +	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
>> +
>> +	elf_buf = dma_alloc_coherent(dev, size, &elf_buf_phys, GFP_KERNEL);
>> +	if (!elf_buf)
> 
> kfree(msg)
> 
ok.

>> +		return -ENOMEM;
>> +
>> +	memcpy(elf_buf, metadata, size);
>> +
>> +	msg->req.sw_id = sw_id;
>> +	msg->req.elf_buf.buf = (u32)elf_buf_phys;
>> +	msg->req.elf_buf.buf_len = (u32)size;
>> +
>> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SEC_AUTH, msg,
>> +				   sizeof(struct tmel_secboot_sec_auth));
>> +	if (ret) {
>> +		pr_err("Failed to send IPC: %d\n", ret);
>> +	} else if (msg->resp.status || msg->resp.extended_error) {
>> +		pr_err("Failed with status: %d error: %d\n",
>> +		       msg->resp.status, msg->resp.extended_error);
>> +		ret = msg->resp.status;
> 
> If resp.status == 0 and resp.extended_error != 0, then the
> function will return 0 (i.e. success). Is that correct?
> 
ok, will fix to propagate both errors.

>> +	}
>> +
>> +	kfree(msg);
>> +	dma_free_coherent(dev, size, elf_buf, elf_buf_phys);
>> +
>> +	return ret;
>> +}
>> +
>> +static int tmel_secboot_teardown(u32 sw_id, u32 secondary_sw_id)
>> +{
>> +	struct device *dev = tmel_get_device();
>> +	struct tmel_secboot_teardown msg = {0};
>> +	int ret;
>> +
>> +	if (!dev)
>> +		return -EINVAL;
>> +
>> +	msg.req.sw_id = sw_id;
>> +	msg.req.secondary_sw_id = secondary_sw_id;
>> +	msg.resp.status = TMEL_ERROR_GENERIC;
>> +
>> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN, &msg,
>> +				   sizeof(msg));
>> +	if (ret) {
>> +		pr_err("Failed to send IPC: %d\n", ret);
>> +	} else if (msg.resp.status) {
>> +		pr_err("Failed with status: %d\n", msg.resp.status);
>> +		ret = msg.resp.status;
>> +	}
>> +
>> +	return ret;
>> +}
>> +
>> +static int tmel_init(struct qmp_device *mdev)
>> +{
>> +	struct tmel *tdev;
>> +
>> +	tdev = devm_kzalloc(mdev->dev, sizeof(*tdev), GFP_KERNEL);
>> +	if (!tdev)
>> +		return -ENOMEM;
>> +
>> +	mutex_init(&tdev->lock);
>> +
>> +	tdev->ipc_pkt = devm_kzalloc(mdev->dev, sizeof(struct tmel_ipc_pkt),
>> +				     GFP_KERNEL);
>> +	if (!tdev->ipc_pkt)
>> +		return -ENOMEM;
>> +
>> +	init_waitqueue_head(&tdev->waitq);
>> +
>> +	tdev->rx_done = false;
>> +	tdev->dev = mdev->dev;
>> +
>> +	tmeldev = tdev;
>> +	tmeldev->mdev = mdev;
>> +
>> +	return 0;
>> +}
>> +
>> +static int tmel_qmp_send(struct mbox_chan *chan, void *data)
>> +{
>> +	struct qmp_device *mdev = chan->con_priv;
>> +
>> +	mdev->qwork.data =  data;
>> +
>> +	queue_work(system_wq, &mdev->qwork.work);
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_qmp_send_work(struct work_struct *work)
>> +{
>> +	struct qmp_work *qwork = container_of(work, struct qmp_work, work);
>> +	struct qmp_device *mdev = tmeldev->mdev;
>> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
>> +
>> +	struct tmel_qmp_msg *tmsg = qwork->data;
>> +	struct tmel_sec_auth *smsg = tmsg->msg;
>> +	int ret;
> 
> 'ret' is unused. Can be removed?
> 
ok.

>> +
>> +	switch (tmsg->msg_id) {
>> +	case TMEL_MSG_UID_SECBOOT_SEC_AUTH:
>> +		ret = tmel_secboot_sec_auth(smsg->pas_id,
>> +					    smsg->data,
>> +					    smsg->size);
>> +		break;
>> +	case TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN:
>> +		ret = tmel_secboot_teardown(smsg->pas_id, 0);
>> +		break;
>> +	}
>> +
>> +	mbox_chan_txdone(chan, 0);
>> +}
>> +
>> +/**
>> + * tmel_qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
>> + *		      device. Make sure the channel is not already in use.
>> + * @mbox:       Mailbox device controlls the requested channel.
>> + * @spec:       Device tree arguments to specify which channel is requested.
>> + */
>> +static struct mbox_chan *tmel_qmp_mbox_of_xlate(struct mbox_controller *mbox,
>> +						const struct of_phandle_args *spec)
>> +{
>> +	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
>> +	unsigned int channel = spec->args[0];
>> +
>> +	if (!mdev)
>> +		return ERR_PTR(-EPROBE_DEFER);
>> +
>> +	if (channel >= mbox->num_chans)
>> +		return ERR_PTR(-EINVAL);
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +	if (mdev->ch_in_use) {
>> +		dev_err(mdev->dev, "mbox channel already in use\n");
>> +		mutex_unlock(&mdev->state_lock);
>> +		return ERR_PTR(-EBUSY);
>> +	}
>> +	mdev->ch_in_use = true;
>> +	mutex_unlock(&mdev->state_lock);
>> +
>> +	return &mbox->chans[0];
>> +}
>> +
>> +static struct mbox_chan_ops tmel_qmp_ops = {
>> +	.startup = tmel_qmp_startup,
>> +	.shutdown = tmel_qmp_shutdown,
>> +	.send_data = tmel_qmp_send,
>> +};
>> +
>> +static int tmel_qmp_probe(struct platform_device *pdev)
>> +{
>> +	struct device_node *node = pdev->dev.of_node;
>> +	struct mbox_chan *chans;
>> +	struct qmp_device *mdev;
>> +	int ret = 0;
>> +
>> +	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
>> +	if (!mdev)
>> +		return -ENOMEM;
>> +
>> +	platform_set_drvdata(pdev, mdev);
>> +
>> +	ret = tmel_qmp_parse_devicetree(pdev, mdev);
>> +	if (ret)
>> +		return ret;
>> +
>> +	mdev->dev = &pdev->dev;
>> +
>> +	chans = devm_kzalloc(mdev->dev,
>> +			     sizeof(*chans) * QMP_NUM_CHANS, GFP_KERNEL);
>> +	if (!chans)
>> +		return -ENOMEM;
>> +
>> +	INIT_WORK(&mdev->qwork.work, tmel_qmp_send_work);
>> +
>> +	mdev->ctrl.dev = &pdev->dev;
>> +	mdev->ctrl.ops = &tmel_qmp_ops;
>> +	mdev->ctrl.chans = chans;
>> +	chans[0].con_priv = mdev;
>> +	mdev->ctrl.num_chans = QMP_NUM_CHANS;
>> +	mdev->ctrl.txdone_irq = true;
>> +	mdev->ctrl.of_xlate = tmel_qmp_mbox_of_xlate;
>> +
>> +	ret = mbox_controller_register(&mdev->ctrl);
>> +	if (ret) {
>> +		dev_err(mdev->dev, "failed to register mbox controller\n");
>> +		return ret;
>> +	}
>> +
>> +	spin_lock_init(&mdev->tx_lock);
>> +	mutex_init(&mdev->state_lock);
>> +	mdev->local_state = LINK_DISCONNECTED;
>> +	init_completion(&mdev->link_complete);
>> +	init_completion(&mdev->ch_complete);
>> +
>> +	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
>> +
>> +	ret = platform_get_irq(pdev, 0);
>> +
>> +	ret = devm_request_threaded_irq(mdev->dev, ret,
>> +					NULL, qmp_irq_handler,
>> +					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
>> +					node->name, (void *)mdev);
>> +	if (ret < 0) {
>> +		dev_err(mdev->dev, "request threaded irq failed, ret %d\n",
>> +			ret);
>> +
>> +		tmel_qmp_remove(pdev);
>> +		return ret;
>> +	}
>> +
>> +	/* Receive any outstanding initial data */
>> +	tmel_init(mdev);
>> +	qmp_rx(mdev);
>> +
>> +	return 0;
>> +}
>> +
>> +static const struct of_device_id tmel_qmp_dt_match[] = {
>> +	{ .compatible = "qcom,ipq5424-tmel-qmp" },
>> +	{},
>> +};
>> +
>> +static struct platform_driver tmel_qmp_driver = {
>> +	.driver = {
>> +		.name = "tmel_qmp_mbox",
>> +		.of_match_table = tmel_qmp_dt_match,
>> +	},
>> +	.probe = tmel_qmp_probe,
>> +	.remove = tmel_qmp_remove,
>> +};
>> +module_platform_driver(tmel_qmp_driver);
>> +
>> +MODULE_DESCRIPTION("QCOM TMEL QMP DRIVER");
>> +MODULE_LICENSE("GPL");
>> diff --git a/include/linux/mailbox/tmelcom-qmp.h b/include/linux/mailbox/tmelcom-qmp.h
>> new file mode 100644
>> index 000000000000..9fa450eaf736
>> --- /dev/null
>> +++ b/include/linux/mailbox/tmelcom-qmp.h
>> @@ -0,0 +1,157 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +/*
>> + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
> 
> Might have to update year.
> 
ok.

>> + */
>> +#ifndef _TMELCOM_H_
>> +#define _TMELCOM_H_
>> +
>> +/*----------------------------------------------------------------------------
>> + * Documentation
>> + * --------------------------------------------------------------------------
>> + */
>> +
>> +/*
>> + * TMEL Messages Unique Identifiers bit layout
>> +    _____________________________________
>> +   |	   |	    |	   |
> 
> Alignment.
> 
ok.

>> +   | 31------16| 15-------8 | 7-------0 |
>> +   | Reserved  |messageType | actionID  |
>> +   |___________|____________|___________|
>> +	       \___________  ___________/
>> +			   \/
>> +		      TMEL_MSG_UID
>> +*/
>> +
>> +/*
>> + * TMEL Messages Unique Identifiers Parameter ID bit layout
>> +_________________________________________________________________________________________
>> +|     |     |     |     |     |     |     |     |     |     |     |    |    |    |       |
>> +|31-30|29-28|27-26|25-24|23-22|21-20|19-18|17-16|15-14|13-12|11-10|9--8|7--6|5--4|3-----0|
>> +| p14 | p13 | p12 | p11 | p10 | p9  | p8  | p7  | p6  | p5  | p4  | p3 | p2 | p1 | nargs |
>> +|type |type |type |type |type |type |type |type |type |type |type |type|type|type|       |
>> +|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|____|____|____|_______|
>> +
>> +*/
>> +
>> +/*
>> + * Macro used to define unique TMEL Message Identifier based on
>> + * message type and action identifier.
>> + */
>> +#define TMEL_MSG_UID_CREATE(m, a)	((u32)(((m & 0xff) << 8) | (a & 0xff)))
>> +
>> +/** Helper macro to extract the messageType from TMEL_MSG_UID. */
>> +#define TMEL_MSG_UID_MSG_TYPE(v)	((v & GENMASK(15, 8)) >> 8)
>> +
>> +/** Helper macro to extract the actionID from TMEL_MSG_UID. */
>> +#define TMEL_MSG_UID_ACTION_ID(v)	(v & GENMASK(7, 0))
>> +
>> +/****************************************************************************
>> + *
>> + * All definitions of supported messageType's.
> 
> No apostrophe.
> 
ok.

>> + *
>> + * 0x00 -> 0xF0 messageType used for production use cases.
>> + * 0xF1 -> 0xFF messageType reserved(can be used for test puprposes).
>> + *
>> + * <Template> : TMEL_MSG_<MSGTYPE_NAME>
>> + * **************************************************************************/
>> +#define TMEL_MSG_SECBOOT		 0x00
>> +
>> +/****************************************************************************
>> + *
>> + * All definitions of action ID's per messageType.
> 
> No apostrophe.
> 
ok.

>> + *
>> + * 0x00 -> 0xBF actionID used for production use cases.
>> + * 0xC0 -> 0xFF messageType must be reserved for test use cases.
>> + *
>> + * NOTE: Test ID's shouldn't appear in this file.
> 
> No apostrophe in ID's.
> 
ok.

>> + *
>> + * <Template> : TMEL_ACTION_<MSGTYPE_NAME>_<ACTIONID_NAME>
>> + * **************************************************************************/
>> +
>> +/*
>> + * ----------------------------------------------------------------------------
>> +		Action ID's for TMEL_MSG_SECBOOT
>> + * ------------------------------------------------------------------------
>> + */
>> +#define TMEL_ACTION_SECBOOT_SEC_AUTH		     0x04
>> +#define TMEL_ACTION_SECBOOT_SS_TEAR_DOWN	     0x0A
> 
> Uppercase hex.
> 
ok.

>> +
>> +/****************************************************************************
>> + *
>> + * All definitions of TMEL Message UID's (messageType | actionID).
> 
> No apostrophe.
> 
ok.

>> + *
>> + * <Template> : TMEL_MSG_UID_<MSGTYPE_NAME>_<ACTIONID_NAME>
>> + * *************************************************************************/
>> +
>> +/*----------------------------------------------------------------------------
>> + * UID's for TMEL_MSG_SECBOOT
>> + *-------------------------------------------------------------------------
>> + */
>> +#define TMEL_MSG_UID_SECBOOT_SEC_AUTH	    TMEL_MSG_UID_CREATE(TMEL_MSG_SECBOOT,\
>> +					    TMEL_ACTION_SECBOOT_SEC_AUTH)
>> +
>> +#define TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN	TMEL_MSG_UID_CREATE(TMEL_MSG_SECBOOT,\
>> +						TMEL_ACTION_SECBOOT_SS_TEAR_DOWN)
>> +
>> +#define HW_MBOX_SIZE			32
>> +#define MBOX_QMP_CTRL_DATA_SIZE		4
>> +#define MBOX_RSV_SIZE			4
>> +#define MBOX_IPC_PACKET_SIZE		(HW_MBOX_SIZE - MBOX_QMP_CTRL_DATA_SIZE - MBOX_RSV_SIZE)
>> +#define MBOX_IPC_MAX_PARAMS		5
>> +
>> +#define MAX_PARAM_IN_PARAM_ID		14
>> +#define PARAM_CNT_FOR_PARAM_TYPE_OUTBUF	3
>> +#define SRAM_IPC_MAX_PARAMS		(MAX_PARAM_IN_PARAM_ID * PARAM_CNT_FOR_PARAM_TYPE_OUTBUF)
>> +#define SRAM_IPC_MAX_BUF_SIZE		(SRAM_IPC_MAX_PARAMS * sizeof(u32))
>> +
>> +#define TMEL_ERROR_GENERIC		(0x1U)
>> +#define TMEL_ERROR_NOT_SUPPORTED	(0x2U)
>> +#define TMEL_ERROR_BAD_PARAMETER	(0x3U)
>> +#define TMEL_ERROR_BAD_MESSAGE		(0x4U)
>> +#define TMEL_ERROR_BAD_ADDRESS		(0x5U)
>> +#define TMEL_ERROR_TMELCOM_FAILURE	(0x6U)
>> +#define TMEL_ERROR_TMEL_BUSY		(0x7U)
> 
> 
> Please use 'u'.
> 
ok.

>> +
>> +enum ipc_type {
>> +	IPC_MBOX_ONLY,
>> +	IPC_MBOX_SRAM,
>> +};
>> +
>> +struct ipc_header {
>> +	u8 ipc_type:1;
>> +	u8 msg_len:7;
>> +	u8 msg_type;
>> +	u8 action_id;
>> +	s8 response;
>> +} __packed;
>> +
>> +struct mbox_payload {
>> +	u32 param[MBOX_IPC_MAX_PARAMS];
>> +};
>> +
>> +struct sram_payload {
>> +	u32 payload_ptr;
>> +	u32 payload_len;
>> +};
>> +
>> +union ipc_payload {
>> +	struct mbox_payload mbox_payload;
>> +	struct sram_payload sram_payload;
>> +} __packed;
>> +
>> +struct tmel_ipc_pkt {
>> +	struct ipc_header msg_hdr;
>> +	union ipc_payload payload;
>> +} __packed;
>> +
>> +struct tmel_qmp_msg {
>> +	void *msg;
>> +	u32 msg_id;
>> +};
>> +
>> +struct tmel_sec_auth {
>> +	void *data;
>> +	u32 size;
>> +	u32 pas_id;
>> +};
>> +#endif  /*_TMELCOM_H_ */
> 
> Stray space after endif.
> Add space after '/*'.
> 
ok, will fix both.

Regards,
  Sricharan
Sricharan Ramabadhran Jan. 3, 2025, 12:29 p.m. UTC | #7
On 12/31/2024 1:36 PM, Krzysztof Kozlowski wrote:
> On 31/12/2024 06:49, Sricharan R wrote:
>> From: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>>
>> This mailbox facilitates the communication between the TME-L server based
>> subsystems (Q6) and the TME-L client (APPSS/BTSS/AUDIOSS), used for security
> 
> <form letter>
> This is a friendly reminder during the review process.
> 
> It seems my or other reviewer's previous comments were not fully
> addressed. Maybe the feedback got lost between the quotes, maybe you
> just forgot to apply it. Please go back to the previous discussion and
> either implement all requested changes or keep discussing them.
> 
> Thank you.
> </form letter>
> 

ho, will recheck and fix missed comment.

> Still wrong wrapping.
> 
ok, will fix.

>> services like secure image authentication, enable/disable efuses, crypto
>> services. Each client in the   SoC has its own block of message RAM and IRQ
>> for communication with the TME-L SS. The protocol used to communicate in the
>> message RAM is known as Qualcomm Messaging Protocol (QMP).
>>
>> Remote proc driver subscribes to this mailbox and uses the mbox_send_message
>> to use TME-L to securely authenticate/teardown the images.
>>
>> Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>> ---
>>    [v2] Added worker for mailbox tx processing, since some of the operations can sleep
>>         Fixed checkpatch warnings. Some [CHECK] like below still exist, but that looks
>>         like a false postive.
>>
>>         CHECK: Macro argument 'm' may be better as '(m)' to avoid precedence issues
>>          #1072: FILE: include/linux/mailbox/tmelcom-qmp.h:40:
>>          +#define TMEL_MSG_UID_CREATE(m, a)      ((u32)(((m & 0xff) << 8) | (a & 0xff)))
>>
>>   drivers/mailbox/Kconfig             |   7 +
>>   drivers/mailbox/Makefile            |   2 +
>>   drivers/mailbox/qcom-tmel-qmp.c     | 971 ++++++++++++++++++++++++++++
>>   include/linux/mailbox/tmelcom-qmp.h | 157 +++++
>>   4 files changed, 1137 insertions(+)
>>   create mode 100644 drivers/mailbox/qcom-tmel-qmp.c
>>   create mode 100644 include/linux/mailbox/tmelcom-qmp.h
>>
>> diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
>> index 8ecba7fb999e..8ad0b834d617 100644
>> --- a/drivers/mailbox/Kconfig
>> +++ b/drivers/mailbox/Kconfig
>> @@ -306,4 +306,11 @@ config THEAD_TH1520_MBOX
>>   	  kernel is running, and E902 core used for power management among other
>>   	  things.
>>   
>> +config QCOM_TMEL_QMP_MAILBOX
> 
> Did you just place it at the end instead of gropped or sorted?
> 
Ho, placed it in end, will fix it.

>> +	tristate "QCOM Mailbox Protocol(QMP) for TME-L SS"
>> +	help
>> +	  Say yes to add support for the QMP Mailbox Protocol driver for TME-L.
>> +	  QMP is a lightweight communication protocol for sending messages to
>> +	  TME-L. This protocol fits into the Generic Mailbox Framework.
>> +	  QMP uses a mailbox registers.
>>   endif
>> diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
>> index 5f4f5b0ce2cc..4dba283a94ad 100644
>> --- a/drivers/mailbox/Makefile
>> +++ b/drivers/mailbox/Makefile
>> @@ -66,3 +66,5 @@ obj-$(CONFIG_QCOM_CPUCP_MBOX)	+= qcom-cpucp-mbox.o
>>   obj-$(CONFIG_QCOM_IPCC)		+= qcom-ipcc.o
>>   
>>   obj-$(CONFIG_THEAD_TH1520_MBOX)	+= mailbox-th1520.o
>> +
>> +obj-$(CONFIG_QCOM_TMEL_QMP_MAILBOX) += qcom-tmel-qmp.o
> 
> Same problem.
> 
ok, will fix.

>> diff --git a/drivers/mailbox/qcom-tmel-qmp.c b/drivers/mailbox/qcom-tmel-qmp.c
>> new file mode 100644
>> index 000000000000..6de0a418e0ae
>> --- /dev/null
>> +++ b/drivers/mailbox/qcom-tmel-qmp.c
>> @@ -0,0 +1,971 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/*
>> + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
>> + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
>> + */
>> +
>> +#include <linux/completion.h>
>> +#include <linux/delay.h>
>> +#include <linux/dma-direction.h>
>> +#include <linux/dma-mapping.h>
>> +#include <linux/init.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/io.h>
>> +#include <linux/kernel.h>
>> +#include <linux/kthread.h>
>> +#include <linux/mailbox_client.h>
>> +#include <linux/mailbox_controller.h>
>> +#include <linux/mailbox/tmelcom-qmp.h>
>> +#include <linux/module.h>
>> +#include <linux/of.h>
>> +#include <linux/of_irq.h>
> 
> Not used
> 
ok, will remove.

>> +#include <linux/of_platform.h>
> 
> Looks also not used
>
will remove.

>> +#include <linux/platform_device.h>
>> +#include <linux/spinlock.h>
>> +#include <linux/types.h>
>> +#include <linux/uaccess.h>
>> +#include <linux/uio.h>
>> +#include <linux/workqueue.h>
> 
> 
> Several headers here look unused.
> 
ok, will check and fix.

>> +
>> +#define QMP_NUM_CHANS	0x1
>> +#define QMP_TOUT_MS	1000
>> +#define MBOX_ALIGN_BYTES	3
>> +#define QMP_CTRL_DATA_SIZE	4
>> +#define QMP_MAX_PKT_SIZE	0x18
>> +#define QMP_UCORE_DESC_OFFSET	0x1000
>> +
>> +#define QMP_CH_VAR_GET(mdev, desc, var) ((mdev)->desc.bits.var)
>> +#define QMP_CH_VAR_SET(mdev, desc, var) (mdev)->desc.bits.var = 1
>> +#define QMP_CH_VAR_CLR(mdev, desc, var) (mdev)->desc.bits.var = 0
>> +
>> +#define QMP_MCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, mcore, var)
>> +#define QMP_MCORE_CH_VAR_SET(mdev, var)	QMP_CH_VAR_SET(mdev, mcore, var)
>> +#define QMP_MCORE_CH_VAR_CLR(mdev, var)	QMP_CH_VAR_CLR(mdev, mcore, var)
>> +
>> +#define QMP_MCORE_CH_VAR_TOGGLE(mdev, var) \
>> +	(mdev)->mcore.bits.var = !((mdev)->mcore.bits.var)
>> +#define QMP_MCORE_CH_ACKED_CHECK(mdev, var) \
>> +	((mdev)->ucore.bits.var == (mdev)->mcore.bits.var##_ack)
>> +#define QMP_MCORE_CH_ACK_UPDATE(mdev, var) \
>> +	(mdev)->mcore.bits.var##_ack = (mdev)->ucore.bits.var
>> +#define QMP_MCORE_CH_VAR_ACK_CLR(mdev, var) \
>> +	(mdev)->mcore.bits.var##_ack = 0
>> +
>> +#define QMP_UCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, ucore, var)
>> +#define QMP_UCORE_CH_ACKED_CHECK(mdev, var) \
>> +	((mdev)->mcore.bits.var == (mdev)->ucore.bits.var##_ack)
>> +#define QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, var) \
>> +	((mdev)->ucore.bits.var != (mdev)->mcore.bits.var##_ack)
>> +
>> +/**
>> + * enum qmp_local_state -	definition of the local state machine
>> + * @LINK_DISCONNECTED:		Init state, waiting for ucore to start
>> + * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
>> + * @LINK_CONNECTED:		Link state up, channel not connected
>> + * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
>> + * @CHANNEL_CONNECTED:		Channel fully opened
>> + * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack
>> + */
>> +enum qmp_local_state {
>> +	LINK_DISCONNECTED,
>> +	LINK_NEGOTIATION,
>> +	LINK_CONNECTED,
>> +	LOCAL_CONNECTING,
>> +	CHANNEL_CONNECTED,
>> +	LOCAL_DISCONNECTING,
>> +};
>> +
>> +union channel_desc {
>> +	struct {
>> +		u32 link_state:1;
>> +		u32 link_state_ack:1;
>> +		u32 ch_state:1;
>> +		u32 ch_state_ack:1;
>> +		u32 tx:1;
>> +		u32 tx_ack:1;
>> +		u32 rx_done:1;
>> +		u32 rx_done_ack:1;
>> +		u32 read_int:1;
>> +		u32 read_int_ack:1;
>> +		u32 reserved:6;
>> +		u32 frag_size:8;
>> +		u32 rem_frag_count:8;
>> +	} bits;
>> +	unsigned int val;
>> +};
>> +
>> +struct qmp_work {
>> +	struct work_struct work;
>> +	void *data;
>> +};
>> +
>> +/**
>> + * struct qmp_device - local information for managing a single mailbox
>> + * @dev:	    The device that corresponds to this mailbox
>> + * @ctrl:	    The mbox controller for this mailbox
>> + * @mcore_desc:	    Local core (APSS) mailbox descriptor
>> + * @ucore_desc:	    Remote core (TME-L) mailbox descriptor
>> + * @mcore:	    Local core (APSS) channel descriptor
>> + * @ucore:	    Remote core (TME-L) channel descriptor
>> + * @rx_pkt:	    Buffer to pass to client, holds received data from mailbox
>> + * @tx_pkt:	    Buffer from client, holds data to send on mailbox
>> + * @mbox_client:    Mailbox client for the IPC interrupt
>> + * @mbox_chan:	    Mailbox client chan for the IPC interrupt
>> + * @local_state:    Current state of mailbox protocol
>> + * @state_lock:	    Serialize mailbox state changes
>> + * @tx_lock:	    Serialize access for writes to mailbox
>> + * @link_complete:  Use to block until link negotiation with remote proc
>> + * @ch_complete:    Use to block until the channel is fully opened
>> + * @dwork:	    Delayed work to detect timed out tx
>> + * @tx_sent:	    True if tx is sent and remote proc has not sent ack
>> + */
>> +struct qmp_device {
>> +	struct device *dev;
>> +	struct mbox_controller ctrl;
>> +	struct qmp_work qwork;
>> +
>> +	void __iomem *mcore_desc;
>> +	void __iomem *ucore_desc;
>> +	union channel_desc mcore;
>> +	union channel_desc ucore;
>> +
>> +	struct kvec rx_pkt;
>> +	struct kvec tx_pkt;
>> +
>> +	struct mbox_client mbox_client;
>> +	struct mbox_chan *mbox_chan;
>> +
>> +	enum qmp_local_state local_state;
>> +
>> +	/* Lock for QMP link state changes */
> 
> Vague
ok, whole of the locking needs some fixing. Infact, with little
more changes, no need to have the 2 mutexes. will remove them and
just have the spinlock for updates to m/u core registers and
state machine tracking.

> 
>> +	struct mutex state_lock;
>> +	/* Lock to serialize access to mailbox */
> 
> No, I don't see serialized access to mailbox. I see some parts of access
> being protected. Write descriptive lock descriptions.
> 
 >
Yes, wrong comment. spinlock was to protect access to m/u core and
state machine updates. will fix it in next version.

>> +	spinlock_t tx_lock;
>> +
>> +	struct completion link_complete;
>> +	struct completion ch_complete;
>> +	struct delayed_work dwork;
>> +	void *data;
>> +
>> +	bool tx_sent;
>> +	bool ch_in_use;
>> +};
>> +
>> +struct tmel_msg_param_type_buf_in {
>> +	u32 buf;
>> +	u32 buf_len;
>> +};
>> +
>> +struct tmel_secboot_sec_auth_req {
>> +	u32 sw_id;
>> +	struct tmel_msg_param_type_buf_in elf_buf;
>> +	struct tmel_msg_param_type_buf_in region_list;
>> +	u32 relocate;
>> +} __packed;
>> +
>> +struct tmel_secboot_sec_auth_resp {
>> +	u32 first_seg_addr;
>> +	u32 first_seg_len;
>> +	u32 entry_addr;
>> +	u32 extended_error;
>> +	u32 status;
>> +} __packed;
>> +
>> +struct tmel_secboot_sec_auth {
>> +	struct tmel_secboot_sec_auth_req req;
>> +	struct tmel_secboot_sec_auth_resp resp;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown_req {
>> +	u32 sw_id;
>> +	u32 secondary_sw_id;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown_resp {
>> +	u32 status;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown {
>> +	struct tmel_secboot_teardown_req req;
>> +	struct tmel_secboot_teardown_resp resp;
>> +} __packed;
>> +
>> +struct tmel {
>> +	struct device *dev;
>> +	struct qmp_device *mdev;
>> +	struct kvec pkt;
>> +	/* To serialize incoming tmel request */
> 
> No, explain what is exactly protected. We all know that mutex serializes...
> 
ok, will remove this mutex in next version.

>> +	struct mutex lock;
>> +	struct tmel_ipc_pkt *ipc_pkt;
>> +	dma_addr_t sram_dma_addr;
>> +	wait_queue_head_t waitq;
>> +	bool rx_done;
>> +};
>> +
>> +static struct tmel *tmeldev;
> 
> NAK, First: it is not needed, second: even if it in this spaghetti code
> it was needed, answer would be drop it and fix your code not to create
> fake singletons.
> 
ok, no need to have this global. with updated design will remove this
in next spin. Will have it instance specific.

>> +
>> +/**
>> + * qmp_send_irq() - send an irq to a remote entity as an event signal.
>> + * @mdev:       Which remote entity that should receive the irq.
>> + */
>> +static void qmp_send_irq(struct qmp_device *mdev)
>> +{
>> +	/* Update the mcore val to mcore register */
>> +	iowrite32(mdev->mcore.val, mdev->mcore_desc);
>> +	/* Ensure desc update is visible before IPC */
>> +	wmb();
>> +
>> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
>> +		mdev->mcore.val, mdev->ucore.val);
>> +
>> +	mbox_send_message(mdev->mbox_chan, NULL);
>> +	mbox_client_txdone(mdev->mbox_chan, 0);
>> +}
>> +
> 
> ...
> 
>> +static irqreturn_t qmp_irq_handler(int irq, void *priv)
>> +{
>> +	struct qmp_device *mdev = (struct qmp_device *)priv;
>> +
>> +	qmp_rx(mdev);
>> +
>> +	return IRQ_HANDLED;
>> +}
>> +
>> +static int tmel_qmp_parse_devicetree(struct platform_device *pdev,
>> +				     struct qmp_device *mdev)
> 
> 
> Probe functions are always next to each other, not in other part of unit.
> 
ok, will fix it.

>> +{
>> +	struct device *dev = &pdev->dev;
>> +
>> +	mdev->mcore_desc = devm_platform_ioremap_resource(pdev, 0);
>> +	if (!mdev->mcore_desc) {
>> +		dev_err(dev, "ioremap failed for mcore reg\n");
>> +		return -EIO;
>> +	}
>> +
>> +	mdev->ucore_desc = mdev->mcore_desc + QMP_UCORE_DESC_OFFSET;
>> +
>> +	mdev->mbox_client.dev = dev;
>> +	mdev->mbox_client.knows_txdone = false;
>> +	mdev->mbox_chan = mbox_request_channel(&mdev->mbox_client, 0);
>> +	if (IS_ERR(mdev->mbox_chan)) {
>> +		dev_err(dev, "mbox chan for IPC is missing\n");
> 
> Syntax is: return dev_err_probe().
> 
ok, will fix it.

>> +		return PTR_ERR(mdev->mbox_chan);
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_qmp_remove(struct platform_device *pdev)
> 
> Again, why remove call is not next to probe?
> 
ok, will fix it.

>> +{
>> +	struct qmp_device *mdev = platform_get_drvdata(pdev);
>> +
>> +	mbox_controller_unregister(&mdev->ctrl);
>> +	kfree(mdev->rx_pkt.iov_base);
> 
> I don't see this being allocated in probe.
> 
Ho  wrong place. Will fix and update with right kfree's.

>> +}
>> +
>> +static struct device *tmel_get_device(void)
>> +{
>> +	struct tmel *tdev = tmeldev;
> 
> Nope. Do not create singletons.
> 
ok, yeah, will be removed in next version.

>> +
>> +	if (!tdev)
>> +		return NULL;
>> +
>> +	return tdev->dev;
>> +}
>> +
>> +static int tmel_prepare_msg(struct tmel *tdev, u32 msg_uid,
>> +			    void *msg_buf, size_t msg_size)
>> +{
>> +	struct tmel_ipc_pkt *ipc_pkt = tdev->ipc_pkt;
>> +	struct ipc_header *msg_hdr = &ipc_pkt->msg_hdr;
>> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
>> +	struct sram_payload *sram_payload = &ipc_pkt->payload.sram_payload;
>> +	int ret;
>> +
>> +	memset(ipc_pkt, 0, sizeof(struct tmel_ipc_pkt));
>> +
>> +	msg_hdr->msg_type = TMEL_MSG_UID_MSG_TYPE(msg_uid);
>> +	msg_hdr->action_id = TMEL_MSG_UID_ACTION_ID(msg_uid);
>> +
>> +	pr_debug("uid: %d, msg_size: %zu msg_type:%d, action_id:%d\n",
>> +		 msg_uid, msg_size, msg_hdr->msg_type, msg_hdr->action_id);
> 
> dev_dbg, stop using pr_ everywhere
> 
ok.

>> +
>> +	if (sizeof(struct ipc_header) + msg_size <= MBOX_IPC_PACKET_SIZE) {
>> +		/* Mbox only */
>> +		msg_hdr->ipc_type = IPC_MBOX_ONLY;
>> +		msg_hdr->msg_len = msg_size;
>> +		memcpy((void *)mbox_payload, msg_buf, msg_size);
>> +	} else if (msg_size <= SRAM_IPC_MAX_BUF_SIZE) {
>> +		/* SRAM */
>> +		msg_hdr->ipc_type = IPC_MBOX_SRAM;
>> +		msg_hdr->msg_len = 8;
>> +
>> +		tdev->sram_dma_addr = dma_map_single(tdev->dev, msg_buf,
>> +						     msg_size,
>> +						     DMA_BIDIRECTIONAL);
>> +		ret = dma_mapping_error(tdev->dev, tdev->sram_dma_addr);
>> +		if (ret != 0) {
>> +			pr_err("SRAM DMA mapping error: %d\n", ret);
>> +			return ret;
>> +		}
>> +
>> +		sram_payload->payload_ptr = tdev->sram_dma_addr;
>> +		sram_payload->payload_len = msg_size;
>> +	} else {
>> +		pr_err("Invalid payload length: %zu\n", msg_size);
> 
> No, dev_err
> 
ok.

>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_unprepare_message(struct tmel *tdev,
>> +				   void *msg_buf, size_t msg_size)
>> +{
>> +	struct tmel_ipc_pkt *ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
>> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
>> +
>> +	if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_ONLY) {
>> +		memcpy(msg_buf, (void *)mbox_payload, msg_size);
>> +	} else if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_SRAM) {
>> +		dma_unmap_single(tdev->dev, tdev->sram_dma_addr, msg_size,
>> +				 DMA_BIDIRECTIONAL);
>> +		tdev->sram_dma_addr = 0;
>> +	}
>> +}
>> +
>> +static bool tmel_rx_done(struct tmel *tdev)
>> +{
>> +	return tdev->rx_done;
>> +}
>> +
>> +static int tmel_process_request(u32 msg_uid, void *msg_buf,
>> +				size_t msg_size)
>> +{
>> +	struct tmel *tdev = tmeldev;
>> +	unsigned long jiffies;
>> +	struct tmel_ipc_pkt *resp_ipc_pkt;
>> +	long time_left = 0;
>> +	int ret = 0;
>> +
>> +	/*
>> +	 * Check to handle if probe is not successful or not completed yet
>> +	 */
> 
> No, it is impossible condition. This code cannot be called before probe.
> 
> Clean up your driver from such spaghetti prevention code and unspaghetti
> it, so you will understand the code flow.
> 
ok sure. wrong condition/comment because of initial debug. will remove.

> 
>> +	if (!tdev) {
>> +		pr_err("tmel dev is NULL\n");
>> +		return -ENODEV;
>> +	}
>> +
>> +	if (!msg_buf || !msg_size) {
>> +		pr_err("Invalid msg_buf or msg_size\n");
> 
> No, use dev_err. This applies everywhere.
> 
ok.

>> +		return -EINVAL;
>> +	}
>> +
>> +	mutex_lock(&tdev->lock);
>> +	tdev->rx_done = false;
>> +
>> +	ret = tmel_prepare_msg(tdev, msg_uid, msg_buf, msg_size);
>> +	if (ret)
>> +		return ret;
>> +
>> +	tdev->pkt.iov_len = sizeof(struct tmel_ipc_pkt);
>> +	tdev->pkt.iov_base = (void *)tdev->ipc_pkt;
>> +
>> +	qmp_send_data(tdev->mdev, &tdev->pkt);
>> +	jiffies = msecs_to_jiffies(30000);
>> +
>> +	time_left = wait_event_interruptible_timeout(tdev->waitq,
>> +						     tmel_rx_done(tdev),
>> +						     jiffies);
>> +
>> +	if (!time_left) {
>> +		pr_err("Request timed out\n");
>> +		ret = -ETIMEDOUT;
>> +		goto err_exit;
>> +	}
>> +
>> +	if (tdev->pkt.iov_len != sizeof(struct tmel_ipc_pkt)) {
>> +		pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
>> +		       tdev->pkt.iov_len, sizeof(struct tmel_ipc_pkt));
>> +		ret = -EPROTO;
>> +		goto err_exit;
>> +	}
>> +
>> +	resp_ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
>> +	tmel_unprepare_message(tdev, msg_buf, msg_size);
>> +	tdev->rx_done = false;
>> +	ret = resp_ipc_pkt->msg_hdr.response;
>> +
>> +err_exit:
>> +	mutex_unlock(&tdev->lock);
>> +	return ret;
>> +}
>> +
>> +static int tmel_secboot_sec_auth(u32 sw_id, void *metadata, size_t size)
>> +{
>> +	struct device *dev = tmel_get_device();
>> +	struct tmel_secboot_sec_auth *msg;
>> +	dma_addr_t elf_buf_phys;
>> +	void *elf_buf;
>> +	int ret;
>> +
>> +	if (!dev || !metadata)
>> +		return -EINVAL;
>> +
>> +	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
>> +
>> +	elf_buf = dma_alloc_coherent(dev, size, &elf_buf_phys, GFP_KERNEL);
>> +	if (!elf_buf)
>> +		return -ENOMEM;
>> +
>> +	memcpy(elf_buf, metadata, size);
>> +
>> +	msg->req.sw_id = sw_id;
>> +	msg->req.elf_buf.buf = (u32)elf_buf_phys;
>> +	msg->req.elf_buf.buf_len = (u32)size;
>> +
>> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SEC_AUTH, msg,
>> +				   sizeof(struct tmel_secboot_sec_auth));
>> +	if (ret) {
>> +		pr_err("Failed to send IPC: %d\n", ret);
>> +	} else if (msg->resp.status || msg->resp.extended_error) {
>> +		pr_err("Failed with status: %d error: %d\n",
>> +		       msg->resp.status, msg->resp.extended_error);
>> +		ret = msg->resp.status;
>> +	}
>> +
>> +	kfree(msg);
>> +	dma_free_coherent(dev, size, elf_buf, elf_buf_phys);
>> +
>> +	return ret;
>> +}
>> +
>> +static int tmel_secboot_teardown(u32 sw_id, u32 secondary_sw_id)
>> +{
>> +	struct device *dev = tmel_get_device();
>> +	struct tmel_secboot_teardown msg = {0};
>> +	int ret;
>> +
>> +	if (!dev)
>> +		return -EINVAL;
>> +
>> +	msg.req.sw_id = sw_id;
>> +	msg.req.secondary_sw_id = secondary_sw_id;
>> +	msg.resp.status = TMEL_ERROR_GENERIC;
>> +
>> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN, &msg,
>> +				   sizeof(msg));
>> +	if (ret) {
>> +		pr_err("Failed to send IPC: %d\n", ret);
>> +	} else if (msg.resp.status) {
>> +		pr_err("Failed with status: %d\n", msg.resp.status);
>> +		ret = msg.resp.status;
>> +	}
>> +
>> +	return ret;
>> +}
>> +
>> +static int tmel_init(struct qmp_device *mdev)
>> +{
>> +	struct tmel *tdev;
>> +
>> +	tdev = devm_kzalloc(mdev->dev, sizeof(*tdev), GFP_KERNEL);
>> +	if (!tdev)
>> +		return -ENOMEM;
>> +
>> +	mutex_init(&tdev->lock);
>> +
>> +	tdev->ipc_pkt = devm_kzalloc(mdev->dev, sizeof(struct tmel_ipc_pkt),
>> +				     GFP_KERNEL);
>> +	if (!tdev->ipc_pkt)
>> +		return -ENOMEM;
>> +
>> +	init_waitqueue_head(&tdev->waitq);
>> +
>> +	tdev->rx_done = false;
>> +	tdev->dev = mdev->dev;
>> +
>> +	tmeldev = tdev;
>> +	tmeldev->mdev = mdev;
>> +
>> +	return 0;
>> +}
>> +
>> +static int tmel_qmp_send(struct mbox_chan *chan, void *data)
>> +{
>> +	struct qmp_device *mdev = chan->con_priv;
>> +
>> +	mdev->qwork.data =  data;
>> +
>> +	queue_work(system_wq, &mdev->qwork.work);
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_qmp_send_work(struct work_struct *work)
>> +{
>> +	struct qmp_work *qwork = container_of(work, struct qmp_work, work);
>> +	struct qmp_device *mdev = tmeldev->mdev;
>> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
>> +
>> +	struct tmel_qmp_msg *tmsg = qwork->data;
>> +	struct tmel_sec_auth *smsg = tmsg->msg;
>> +	int ret;
>> +
>> +	switch (tmsg->msg_id) {
>> +	case TMEL_MSG_UID_SECBOOT_SEC_AUTH:
>> +		ret = tmel_secboot_sec_auth(smsg->pas_id,
>> +					    smsg->data,
>> +					    smsg->size);
>> +		break;
>> +	case TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN:
>> +		ret = tmel_secboot_teardown(smsg->pas_id, 0);
>> +		break;
>> +	}
>> +
>> +	mbox_chan_txdone(chan, 0);
>> +}
>> +
>> +/**
>> + * tmel_qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
>> + *		      device. Make sure the channel is not already in use.
>> + * @mbox:       Mailbox device controlls the requested channel.
>> + * @spec:       Device tree arguments to specify which channel is requested.
>> + */
>> +static struct mbox_chan *tmel_qmp_mbox_of_xlate(struct mbox_controller *mbox,
>> +						const struct of_phandle_args *spec)
>> +{
>> +	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
>> +	unsigned int channel = spec->args[0];
>> +
>> +	if (!mdev)
>> +		return ERR_PTR(-EPROBE_DEFER);
>> +
>> +	if (channel >= mbox->num_chans)
>> +		return ERR_PTR(-EINVAL);
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +	if (mdev->ch_in_use) {
>> +		dev_err(mdev->dev, "mbox channel already in use\n");
>> +		mutex_unlock(&mdev->state_lock);
>> +		return ERR_PTR(-EBUSY);
> 
> 
> Why one cannot call xlate twice for the same argument? This looks wrong.
> Xlate does not mean that mailbox is being used.
> 
yeah, same channel could be used for different services. This condition
needs to be removed. Will fix it.

> 
>> +	}
>> +	mdev->ch_in_use = true;
>> +	mutex_unlock(&mdev->state_lock);
>> +
>> +	return &mbox->chans[0];
>> +}
>> +
>> +static struct mbox_chan_ops tmel_qmp_ops = {
>> +	.startup = tmel_qmp_startup,
>> +	.shutdown = tmel_qmp_shutdown,
>> +	.send_data = tmel_qmp_send,
>> +};
>> +
>> +static int tmel_qmp_probe(struct platform_device *pdev)
>> +{
>> +	struct device_node *node = pdev->dev.of_node;
>> +	struct mbox_chan *chans;
>> +	struct qmp_device *mdev;
>> +	int ret = 0;
>> +
>> +	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
>> +	if (!mdev)
>> +		return -ENOMEM;
>> +
>> +	platform_set_drvdata(pdev, mdev);
>> +
>> +	ret = tmel_qmp_parse_devicetree(pdev, mdev);
>> +	if (ret)
>> +		return ret;
>> +
>> +	mdev->dev = &pdev->dev;
>> +
>> +	chans = devm_kzalloc(mdev->dev,
> 
> 
> devm_kcalloc
> 
ok.

>> +			     sizeof(*chans) * QMP_NUM_CHANS, GFP_KERNEL);
>> +	if (!chans)
>> +		return -ENOMEM;
>> +
>> +	INIT_WORK(&mdev->qwork.work, tmel_qmp_send_work);
>> +
>> +	mdev->ctrl.dev = &pdev->dev;
>> +	mdev->ctrl.ops = &tmel_qmp_ops;
>> +	mdev->ctrl.chans = chans;
>> +	chans[0].con_priv = mdev;
>> +	mdev->ctrl.num_chans = QMP_NUM_CHANS;
>> +	mdev->ctrl.txdone_irq = true;
>> +	mdev->ctrl.of_xlate = tmel_qmp_mbox_of_xlate;
>> +
>> +	ret = mbox_controller_register(&mdev->ctrl);
>> +	if (ret) {
>> +		dev_err(mdev->dev, "failed to register mbox controller\n");
>> +		return ret;
>> +	}
>> +
>> +	spin_lock_init(&mdev->tx_lock);
>> +	mutex_init(&mdev->state_lock);
>> +	mdev->local_state = LINK_DISCONNECTED;
>> +	init_completion(&mdev->link_complete);
>> +	init_completion(&mdev->ch_complete);
>> +
>> +	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
>> +
>> +	ret = platform_get_irq(pdev, 0);
>> +
>> +	ret = devm_request_threaded_irq(mdev->dev, ret,
>> +					NULL, qmp_irq_handler,
>> +					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
>> +					node->name, (void *)mdev);
>> +	if (ret < 0) {
>> +		dev_err(mdev->dev, "request threaded irq failed, ret %d\n",
>> +			ret);
> 
> dev_err_probe
> 
ok.

>> +
> 
> 
> 
> 
>> +
>> +static const struct of_device_id tmel_qmp_dt_match[] = {
>> +	{ .compatible = "qcom,ipq5424-tmel-qmp" },
>> +	{},
>> +};
>> +
>> +static struct platform_driver tmel_qmp_driver = {
>> +	.driver = {
>> +		.name = "tmel_qmp_mbox",
>> +		.of_match_table = tmel_qmp_dt_match,
>> +	},
>> +	.probe = tmel_qmp_probe,
>> +	.remove = tmel_qmp_remove,
>> +};
>> +module_platform_driver(tmel_qmp_driver);
>> +
>> +MODULE_DESCRIPTION("QCOM TMEL QMP DRIVER");
> 
> "driver", this is not an acronym.
> 
ok.

Regards,
  Sricharan
Sricharan Ramabadhran Jan. 6, 2025, 10:29 a.m. UTC | #8
On 12/31/2024 9:52 PM, Dmitry Baryshkov wrote:
> On Tue, Dec 31, 2024 at 11:19:00AM +0530, Sricharan R wrote:
>> From: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>>
>> This mailbox facilitates the communication between the TME-L server based
>> subsystems (Q6) and the TME-L client (APPSS/BTSS/AUDIOSS), used for security
>> services like secure image authentication, enable/disable efuses, crypto
>> services. Each client in the   SoC has its own block of message RAM and IRQ
>> for communication with the TME-L SS. The protocol used to communicate in the
>> message RAM is known as Qualcomm Messaging Protocol (QMP).
>>
>> Remote proc driver subscribes to this mailbox and uses the mbox_send_message
>> to use TME-L to securely authenticate/teardown the images.
> 
> You seem to be doing a lot of plays with __iomem-related data. Are you
> sure your driver passes sparse checks?
> 
ok, will check this and also lock_debugging etc.

>>
>> Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
>> ---
>>    [v2] Added worker for mailbox tx processing, since some of the operations can sleep
>>         Fixed checkpatch warnings. Some [CHECK] like below still exist, but that looks
>>         like a false postive.
>>
>>         CHECK: Macro argument 'm' may be better as '(m)' to avoid precedence issues
>>          #1072: FILE: include/linux/mailbox/tmelcom-qmp.h:40:
>>          +#define TMEL_MSG_UID_CREATE(m, a)      ((u32)(((m & 0xff) << 8) | (a & 0xff)))
> 
> It is not, please implement the suggestion.
ok.

> 
>>
>>   drivers/mailbox/Kconfig             |   7 +
>>   drivers/mailbox/Makefile            |   2 +
>>   drivers/mailbox/qcom-tmel-qmp.c     | 971 ++++++++++++++++++++++++++++
>>   include/linux/mailbox/tmelcom-qmp.h | 157 +++++
>>   4 files changed, 1137 insertions(+)
>>   create mode 100644 drivers/mailbox/qcom-tmel-qmp.c
>>   create mode 100644 include/linux/mailbox/tmelcom-qmp.h
>>
>> diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
>> index 8ecba7fb999e..8ad0b834d617 100644
>> --- a/drivers/mailbox/Kconfig
>> +++ b/drivers/mailbox/Kconfig
>> @@ -306,4 +306,11 @@ config THEAD_TH1520_MBOX
>>   	  kernel is running, and E902 core used for power management among other
>>   	  things.
>>   
>> +config QCOM_TMEL_QMP_MAILBOX
>> +	tristate "QCOM Mailbox Protocol(QMP) for TME-L SS"
> 
> What is TME-L (or TMEL) SS? AmSamoa? South Sudan? ß? Schutzstaffel?
Trust Management Engine-Lite Sub system, will add.

> 
>> +	help
>> +	  Say yes to add support for the QMP Mailbox Protocol driver for TME-L.
>> +	  QMP is a lightweight communication protocol for sending messages to
>> +	  TME-L. This protocol fits into the Generic Mailbox Framework.
>> +	  QMP uses a mailbox registers.
>>   endif
>> diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
>> index 5f4f5b0ce2cc..4dba283a94ad 100644
>> --- a/drivers/mailbox/Makefile
>> +++ b/drivers/mailbox/Makefile
>> @@ -66,3 +66,5 @@ obj-$(CONFIG_QCOM_CPUCP_MBOX)	+= qcom-cpucp-mbox.o
>>   obj-$(CONFIG_QCOM_IPCC)		+= qcom-ipcc.o
>>   
>>   obj-$(CONFIG_THEAD_TH1520_MBOX)	+= mailbox-th1520.o
>> +
>> +obj-$(CONFIG_QCOM_TMEL_QMP_MAILBOX) += qcom-tmel-qmp.o
>> diff --git a/drivers/mailbox/qcom-tmel-qmp.c b/drivers/mailbox/qcom-tmel-qmp.c
>> new file mode 100644
>> index 000000000000..6de0a418e0ae
>> --- /dev/null
>> +++ b/drivers/mailbox/qcom-tmel-qmp.c
>> @@ -0,0 +1,971 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/*
>> + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
>> + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
>> + */
>> +
>> +#include <linux/completion.h>
>> +#include <linux/delay.h>
>> +#include <linux/dma-direction.h>
>> +#include <linux/dma-mapping.h>
>> +#include <linux/init.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/io.h>
>> +#include <linux/kernel.h>
>> +#include <linux/kthread.h>
>> +#include <linux/mailbox_client.h>
>> +#include <linux/mailbox_controller.h>
>> +#include <linux/mailbox/tmelcom-qmp.h>
>> +#include <linux/module.h>
>> +#include <linux/of.h>
>> +#include <linux/of_irq.h>
>> +#include <linux/of_platform.h>
>> +#include <linux/platform_device.h>
>> +#include <linux/spinlock.h>
>> +#include <linux/types.h>
>> +#include <linux/uaccess.h>
>> +#include <linux/uio.h>
>> +#include <linux/workqueue.h>
>> +
>> +#define QMP_NUM_CHANS	0x1
>> +#define QMP_TOUT_MS	1000
>> +#define MBOX_ALIGN_BYTES	3
>> +#define QMP_CTRL_DATA_SIZE	4
>> +#define QMP_MAX_PKT_SIZE	0x18
>> +#define QMP_UCORE_DESC_OFFSET	0x1000
>> +
>> +#define QMP_CH_VAR_GET(mdev, desc, var) ((mdev)->desc.bits.var)
>> +#define QMP_CH_VAR_SET(mdev, desc, var) (mdev)->desc.bits.var = 1
>> +#define QMP_CH_VAR_CLR(mdev, desc, var) (mdev)->desc.bits.var = 0
> 
> Inline.
ok.

> 
>> +
>> +#define QMP_MCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, mcore, var)
>> +#define QMP_MCORE_CH_VAR_SET(mdev, var)	QMP_CH_VAR_SET(mdev, mcore, var)
>> +#define QMP_MCORE_CH_VAR_CLR(mdev, var)	QMP_CH_VAR_CLR(mdev, mcore, var)
> 
> Inline. No wrappers around wrappers around wrappers.
ok.

> 
>> +
>> +#define QMP_MCORE_CH_VAR_TOGGLE(mdev, var) \
>> +	(mdev)->mcore.bits.var = !((mdev)->mcore.bits.var)
>> +#define QMP_MCORE_CH_ACKED_CHECK(mdev, var) \
>> +	((mdev)->ucore.bits.var == (mdev)->mcore.bits.var##_ack)
>> +#define QMP_MCORE_CH_ACK_UPDATE(mdev, var) \
>> +	(mdev)->mcore.bits.var##_ack = (mdev)->ucore.bits.var
>> +#define QMP_MCORE_CH_VAR_ACK_CLR(mdev, var) \
>> +	(mdev)->mcore.bits.var##_ack = 0
> 
> Ugh, no.
> 
Ho ok, will format it better.

>> +
>> +#define QMP_UCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, ucore, var)
>> +#define QMP_UCORE_CH_ACKED_CHECK(mdev, var) \
>> +	((mdev)->mcore.bits.var == (mdev)->ucore.bits.var##_ack)
>> +#define QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, var) \
>> +	((mdev)->ucore.bits.var != (mdev)->mcore.bits.var##_ack)
>> +
>> +/**
>> + * enum qmp_local_state -	definition of the local state machine
>> + * @LINK_DISCONNECTED:		Init state, waiting for ucore to start
> 
> What is ucore?
> 
ucore and mcore refers to the out and the in bound descriptors for the
remote TMEL. Soc client sends the IPC requests to TMEL over the
mcore channel descriptor and receives the IPC data back from TMEL
over the ucore.

>> + * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
>> + * @LINK_CONNECTED:		Link state up, channel not connected
>> + * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
>> + * @CHANNEL_CONNECTED:		Channel fully opened
>> + * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack
> 
> Unindent, please. At least the top line shouldn't have it.
> 
ok.

>> + */
>> +enum qmp_local_state {
>> +	LINK_DISCONNECTED,
>> +	LINK_NEGOTIATION,
>> +	LINK_CONNECTED,
>> +	LOCAL_CONNECTING,
>> +	CHANNEL_CONNECTED,
>> +	LOCAL_DISCONNECTING,
>> +};
>> +
>> +union channel_desc {
>> +	struct {
>> +		u32 link_state:1;
>> +		u32 link_state_ack:1;
>> +		u32 ch_state:1;
>> +		u32 ch_state_ack:1;
>> +		u32 tx:1;
>> +		u32 tx_ack:1;
>> +		u32 rx_done:1;
>> +		u32 rx_done_ack:1;
>> +		u32 read_int:1;
>> +		u32 read_int_ack:1;
>> +		u32 reserved:6;
>> +		u32 frag_size:8;
>> +		u32 rem_frag_count:8;
>> +	} bits;
>> +	unsigned int val;
>> +};
>> +
>> +struct qmp_work {
>> +	struct work_struct work;
>> +	void *data;
>> +};
>> +
>> +/**
>> + * struct qmp_device - local information for managing a single mailbox
>> + * @dev:	    The device that corresponds to this mailbox
>> + * @ctrl:	    The mbox controller for this mailbox
>> + * @mcore_desc:	    Local core (APSS) mailbox descriptor
>> + * @ucore_desc:	    Remote core (TME-L) mailbox descriptor
>> + * @mcore:	    Local core (APSS) channel descriptor
>> + * @ucore:	    Remote core (TME-L) channel descriptor
>> + * @rx_pkt:	    Buffer to pass to client, holds received data from mailbox
>> + * @tx_pkt:	    Buffer from client, holds data to send on mailbox
>> + * @mbox_client:    Mailbox client for the IPC interrupt
>> + * @mbox_chan:	    Mailbox client chan for the IPC interrupt
>> + * @local_state:    Current state of mailbox protocol
>> + * @state_lock:	    Serialize mailbox state changes
>> + * @tx_lock:	    Serialize access for writes to mailbox
>> + * @link_complete:  Use to block until link negotiation with remote proc
>> + * @ch_complete:    Use to block until the channel is fully opened
>> + * @dwork:	    Delayed work to detect timed out tx
>> + * @tx_sent:	    True if tx is sent and remote proc has not sent ack
>> + */
>> +struct qmp_device {
>> +	struct device *dev;
>> +	struct mbox_controller ctrl;
>> +	struct qmp_work qwork;
>> +
>> +	void __iomem *mcore_desc;
>> +	void __iomem *ucore_desc;
>> +	union channel_desc mcore;
>> +	union channel_desc ucore;
>> +
>> +	struct kvec rx_pkt;
>> +	struct kvec tx_pkt;
>> +
>> +	struct mbox_client mbox_client;
>> +	struct mbox_chan *mbox_chan;
>> +
>> +	enum qmp_local_state local_state;
>> +
>> +	/* Lock for QMP link state changes */
>> +	struct mutex state_lock;
>> +	/* Lock to serialize access to mailbox */
>> +	spinlock_t tx_lock;
>> +
>> +	struct completion link_complete;
>> +	struct completion ch_complete;
>> +	struct delayed_work dwork;
>> +	void *data;
>> +
>> +	bool tx_sent;
>> +	bool ch_in_use;
>> +};
>> +
>> +struct tmel_msg_param_type_buf_in {
>> +	u32 buf;
>> +	u32 buf_len;
>> +};
>> +
>> +struct tmel_secboot_sec_auth_req {
>> +	u32 sw_id;
>> +	struct tmel_msg_param_type_buf_in elf_buf;
>> +	struct tmel_msg_param_type_buf_in region_list;
>> +	u32 relocate;
>> +} __packed;
>> +
>> +struct tmel_secboot_sec_auth_resp {
>> +	u32 first_seg_addr;
>> +	u32 first_seg_len;
>> +	u32 entry_addr;
>> +	u32 extended_error;
>> +	u32 status;
>> +} __packed;
>> +
>> +struct tmel_secboot_sec_auth {
>> +	struct tmel_secboot_sec_auth_req req;
>> +	struct tmel_secboot_sec_auth_resp resp;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown_req {
>> +	u32 sw_id;
>> +	u32 secondary_sw_id;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown_resp {
>> +	u32 status;
>> +} __packed;
>> +
>> +struct tmel_secboot_teardown {
>> +	struct tmel_secboot_teardown_req req;
>> +	struct tmel_secboot_teardown_resp resp;
>> +} __packed;
>> +
>> +struct tmel {
>> +	struct device *dev;
>> +	struct qmp_device *mdev;
>> +	struct kvec pkt;
>> +	/* To serialize incoming tmel request */
>> +	struct mutex lock;
>> +	struct tmel_ipc_pkt *ipc_pkt;
>> +	dma_addr_t sram_dma_addr;
>> +	wait_queue_head_t waitq;
>> +	bool rx_done;
>> +};
>> +
>> +static struct tmel *tmeldev;
> 
> What? Unprotected globabl static variable? Why do you need it at all?
> Drop immediately.
> 
 >
ok, no need to have this global. with updated design will remove this
in next spin. Will have it instance specific.

>> +
>> +/**
>> + * qmp_send_irq() - send an irq to a remote entity as an event signal.
>> + * @mdev:       Which remote entity that should receive the irq.
>> + */
>> +static void qmp_send_irq(struct qmp_device *mdev)
>> +{
>> +	/* Update the mcore val to mcore register */
> 
> What is the use for such comments?
> 
agree, came in for some intital debug, will remove.

>> +	iowrite32(mdev->mcore.val, mdev->mcore_desc);
>> +	/* Ensure desc update is visible before IPC */
>> +	wmb();
>> +
>> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
>> +		mdev->mcore.val, mdev->ucore.val);
>> +
>> +	mbox_send_message(mdev->mbox_chan, NULL);
>> +	mbox_client_txdone(mdev->mbox_chan, 0);
>> +}
>> +
>> +/**
>> + * qmp_notify_timeout() - Notify client of tx timeout with -ETIME
>> + * @work:		  Structure for work that was scheduled.
>> + */
>> +static void qmp_notify_timeout(struct work_struct *work)
>> +{
>> +	struct delayed_work *dwork = to_delayed_work(work);
>> +	struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
>> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
>> +	int err = -ETIME;
>> +	unsigned long flags;
>> +
>> +	spin_lock_irqsave(&mdev->tx_lock, flags);
>> +	if (!mdev->tx_sent) {
>> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +		return;
>> +	}
>> +	mdev->tx_sent = false;
>> +	spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +	dev_dbg(mdev->dev, "%s: TX timeout", __func__);
>> +	mbox_chan_txdone(chan, err);
>> +}
>> +
>> +static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
>> +{
>> +	schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TOUT_MS));
>> +}
>> +
>> +/**
>> + * tmel_qmp_startup() - Start qmp mailbox channel for communication. Waits for
>> + *		       remote subsystem to open channel if link is not
>> + *		       initated or until timeout.
>> + * @chan:	       mailbox channel that is being opened.
>> + *
>> + * Return: 0 on succes or standard Linux error code.
>> + */
>> +static int tmel_qmp_startup(struct mbox_chan *chan)
>> +{
>> +	struct qmp_device *mdev = chan->con_priv;
>> +	int ret;
>> +
>> +	if (!mdev)
>> +		return -EINVAL;
> 
> Is it a real case or just protective coding?
> 
will remove.

>> +
>> +	ret = wait_for_completion_timeout(&mdev->link_complete,
>> +					  msecs_to_jiffies(QMP_TOUT_MS));
>> +	if (!ret)
>> +		return -EAGAIN;
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +	if (mdev->local_state == LINK_CONNECTED) {
>> +		QMP_MCORE_CH_VAR_SET(mdev, ch_state);
>> +		mdev->local_state = LOCAL_CONNECTING;
>> +		dev_dbg(mdev->dev, "link complete, local connecting");
>> +		qmp_send_irq(mdev);
>> +	}
>> +	mutex_unlock(&mdev->state_lock);
>> +
>> +	ret = wait_for_completion_timeout(&mdev->ch_complete,
>> +					  msecs_to_jiffies(QMP_TOUT_MS));
>> +	if (!ret)
>> +		return -ETIME;
>> +
>> +	return 0;
>> +}
>> +
>> +/**
>> + * qmp_send_data() - Copy the data to the channel's mailbox and notify
>> + *		     remote subsystem of new data. This function will
>> + *		     return an error if the previous message sent has
>> + *		     not been read. Cannot Sleep.
>> + * @chan:	mailbox channel that data is to be sent over.
>> + * @data:	Data to be sent to remote processor, should be in the format of
>> + *		a kvec.
>> + *
>> + * Return: 0 on succes or standard Linux error code.
>> + */
>> +static int qmp_send_data(struct qmp_device *mdev, void *data)
>> +{
>> +	struct kvec *pkt = (struct kvec *)data;
>> +	void __iomem *addr;
>> +	unsigned long flags;
>> +
>> +	if (!mdev || !data || !completion_done(&mdev->ch_complete))
>> +		return -EINVAL;
> 
> Is it a real case or just protective coding?
> 
Will remove, should not be required, already taken care in mbox startup.

>> +
>> +	if (pkt->iov_len > QMP_MAX_PKT_SIZE) {
>> +		dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
>> +		return -EINVAL;
>> +	}
>> +
>> +	spin_lock_irqsave(&mdev->tx_lock, flags);
>> +	if (mdev->tx_sent) {
>> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +		return -EAGAIN;
>> +	}
>> +
>> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
>> +		mdev->mcore.val, mdev->ucore.val);
>> +
>> +	addr = mdev->mcore_desc + QMP_CTRL_DATA_SIZE;
>> +	memcpy_toio(addr, pkt->iov_base, pkt->iov_len);
>> +
>> +	mdev->mcore.bits.frag_size = pkt->iov_len;
>> +	mdev->mcore.bits.rem_frag_count = 0;
>> +
>> +	dev_dbg(mdev->dev, "Copied buffer to mbox, sz: %d",
>> +		mdev->mcore.bits.frag_size);
>> +
>> +	mdev->tx_sent = true;
>> +	QMP_MCORE_CH_VAR_TOGGLE(mdev, tx);
>> +	qmp_send_irq(mdev);
>> +	qmp_schedule_tx_timeout(mdev);
>> +	spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +
>> +	return 0;
>> +}
>> +
>> +/**
>> + * tmel_qmp_shutdown() - Disconnect this mailbox channel so the client does not
>> + *			 receive anymore data and can reliquish control
>> + *			 of the channel.
>> + * @chan:		 mailbox channel to be shutdown.
>> + */
>> +static void tmel_qmp_shutdown(struct mbox_chan *chan)
>> +{
>> +	struct qmp_device *mdev = chan->con_priv;
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +	if (mdev->local_state != LINK_DISCONNECTED) {
>> +		mdev->local_state = LOCAL_DISCONNECTING;
>> +		QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
>> +		qmp_send_irq(mdev);
>> +	}
>> +	mutex_unlock(&mdev->state_lock);
>> +}
>> +
>> +static void tmel_receive_message(void *message)
>> +{
>> +	struct tmel *tdev = tmeldev;
>> +	struct kvec *pkt = NULL;
>> +
>> +	if (!message) {
>> +		pr_err("spurious message received\n");
> 
> s/pr_/dev_/ all over the place.
> 
ok.

>> +		goto tmel_receive_end;
>> +	}
>> +
>> +	if (tdev->rx_done) {
>> +		pr_err("tmel response pending\n");
>> +		goto tmel_receive_end;
>> +	}
>> +
>> +	pkt = (struct kvec *)message;
>> +	tdev->pkt.iov_len = pkt->iov_len;
>> +	tdev->pkt.iov_base = pkt->iov_base;
>> +	tdev->rx_done = true;
>> +
>> +tmel_receive_end:
>> +	wake_up_interruptible(&tdev->waitq);
>> +}
>> +
>> +/**
>> + * qmp_recv_data() -	received notification that data is available in the
>> + *			mailbox. Copy data from mailbox and pass to client.
>> + * @mbox:		mailbox device that received the notification.
>> + * @mbox_of:		offset of mailbox after QMP Control data.
>> + */
>> +static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
>> +{
>> +	void __iomem *addr;
>> +	struct kvec *pkt;
>> +
>> +	addr = mdev->ucore_desc + mbox_of;
>> +	pkt = &mdev->rx_pkt;
>> +	pkt->iov_len = mdev->ucore.bits.frag_size;
>> +
>> +	memcpy_fromio(pkt->iov_base, addr, pkt->iov_len);
>> +	QMP_MCORE_CH_ACK_UPDATE(mdev, tx);
>> +	dev_dbg(mdev->dev, "%s: Send RX data to TMEL Client", __func__);
>> +	tmel_receive_message(pkt);
>> +
>> +	QMP_MCORE_CH_VAR_TOGGLE(mdev, rx_done);
>> +	qmp_send_irq(mdev);
>> +}
>> +
>> +/**
>> + * clr_mcore_ch_state() - Clear the mcore state of a mailbox.
>> + * @mdev:	mailbox device to be initialized.
>> + */
>> +static void clr_mcore_ch_state(struct qmp_device *mdev)
>> +{
>> +	QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
>> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, ch_state);
>> +
>> +	QMP_MCORE_CH_VAR_CLR(mdev, tx);
>> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, tx);
>> +
>> +	QMP_MCORE_CH_VAR_CLR(mdev, rx_done);
>> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, rx_done);
>> +
>> +	QMP_MCORE_CH_VAR_CLR(mdev, read_int);
>> +	QMP_MCORE_CH_VAR_ACK_CLR(mdev, read_int);
>> +
>> +	mdev->mcore.bits.frag_size = 0;
>> +	mdev->mcore.bits.rem_frag_count = 0;
>> +}
>> +
>> +/**
>> + * qmp_rx() - Handle incoming messages from remote processor.
>> + * @mbox:	mailbox device that received notification.
>> + */
>> +static void qmp_rx(struct qmp_device *mdev)
>> +{
>> +	unsigned long flags;
>> +
>> +	/* read remote_desc from mailbox register */
>> +	mdev->ucore.val = ioread32(mdev->ucore_desc);
>> +
>> +	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
>> +		mdev->mcore.val, mdev->ucore.val);
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +
>> +	/* Check if remote link down */
>> +	if (mdev->local_state >= LINK_CONNECTED &&
>> +	    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
>> +		mdev->local_state = LINK_NEGOTIATION;
>> +		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
>> +		qmp_send_irq(mdev);
>> +		mutex_unlock(&mdev->state_lock);
>> +		return;
>> +	}
>> +
>> +	switch (mdev->local_state) {
>> +	case LINK_DISCONNECTED:
>> +		QMP_MCORE_CH_VAR_SET(mdev, link_state);
>> +		mdev->local_state = LINK_NEGOTIATION;
>> +		mdev->rx_pkt.iov_base = kzalloc(QMP_MAX_PKT_SIZE,
>> +						GFP_KERNEL);
> 
> Move to probe, use devm.
> 
ok.

>> +
>> +		if (!mdev->rx_pkt.iov_base) {
>> +			dev_err(mdev->dev, "rx pkt alloc failed");
>> +			break;
>> +		}
>> +		dev_dbg(mdev->dev, "Set to link negotiation");
>> +		qmp_send_irq(mdev);
>> +
>> +		break;
>> +	case LINK_NEGOTIATION:
>> +		if (!QMP_MCORE_CH_VAR_GET(mdev, link_state) ||
>> +		    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
>> +			dev_err(mdev->dev, "rx irq:link down state\n");
>> +			break;
>> +		}
>> +
>> +		clr_mcore_ch_state(mdev);
>> +		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
>> +		mdev->local_state = LINK_CONNECTED;
>> +		complete_all(&mdev->link_complete);
>> +		dev_dbg(mdev->dev, "Set to link connected");
>> +
>> +		break;
>> +	case LINK_CONNECTED:
>> +		/* No need to handle until local opens */
>> +		break;
>> +	case LOCAL_CONNECTING:
>> +		/* Ack to remote ch_state change */
>> +		QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
>> +
>> +		mdev->local_state = CHANNEL_CONNECTED;
>> +		complete_all(&mdev->ch_complete);
>> +		dev_dbg(mdev->dev, "Set to channel connected");
>> +		qmp_send_irq(mdev);
>> +		break;
>> +	case CHANNEL_CONNECTED:
>> +		/* Check for remote channel down */
>> +		if (!QMP_UCORE_CH_VAR_GET(mdev, ch_state)) {
>> +			mdev->local_state = LOCAL_CONNECTING;
>> +			QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
>> +			dev_dbg(mdev->dev, "Remote Disconnect");
>> +			qmp_send_irq(mdev);
>> +		}
>> +
>> +		spin_lock_irqsave(&mdev->tx_lock, flags);
>> +		/* Check TX done */
>> +		if (mdev->tx_sent &&
>> +		    QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, rx_done)) {
>> +			/* Ack to remote */
>> +			QMP_MCORE_CH_ACK_UPDATE(mdev, rx_done);
>> +			mdev->tx_sent = false;
>> +			cancel_delayed_work(&mdev->dwork);
>> +			dev_dbg(mdev->dev, "TX flag cleared");
>> +		}
>> +		spin_unlock_irqrestore(&mdev->tx_lock, flags);
>> +
>> +		/* Check if remote is Transmitting */
>> +		if (!QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, tx))
>> +			break;
>> +		if (mdev->ucore.bits.frag_size == 0 ||
>> +		    mdev->ucore.bits.frag_size > QMP_MAX_PKT_SIZE) {
>> +			dev_err(mdev->dev, "Rx frag size error %d\n",
>> +				mdev->ucore.bits.frag_size);
>> +			break;
>> +		}
>> +
>> +		qmp_recv_data(mdev, QMP_CTRL_DATA_SIZE);
>> +		break;
>> +	case LOCAL_DISCONNECTING:
>> +		if (!QMP_MCORE_CH_VAR_GET(mdev, ch_state)) {
>> +			clr_mcore_ch_state(mdev);
>> +			mdev->local_state = LINK_CONNECTED;
>> +			dev_dbg(mdev->dev, "Channel closed");
>> +			reinit_completion(&mdev->ch_complete);
>> +		}
>> +
>> +		break;
>> +	default:
>> +		dev_err(mdev->dev, "Local Channel State corrupted\n");
>> +	}
>> +	mutex_unlock(&mdev->state_lock);
>> +}
>> +
>> +static irqreturn_t qmp_irq_handler(int irq, void *priv)
>> +{
>> +	struct qmp_device *mdev = (struct qmp_device *)priv;
>> +
>> +	qmp_rx(mdev);
>> +
>> +	return IRQ_HANDLED;
>> +}
>> +
>> +static int tmel_qmp_parse_devicetree(struct platform_device *pdev,
>> +				     struct qmp_device *mdev)
> 
> inline.
> 
ok.

>> +{
>> +	struct device *dev = &pdev->dev;
>> +
>> +	mdev->mcore_desc = devm_platform_ioremap_resource(pdev, 0);
>> +	if (!mdev->mcore_desc) {
> 
> Incorrect
> 
ok.

>> +		dev_err(dev, "ioremap failed for mcore reg\n");
>> +		return -EIO;
>> +	}
>> +
>> +	mdev->ucore_desc = mdev->mcore_desc + QMP_UCORE_DESC_OFFSET;
>> +
>> +	mdev->mbox_client.dev = dev;
>> +	mdev->mbox_client.knows_txdone = false;
>> +	mdev->mbox_chan = mbox_request_channel(&mdev->mbox_client, 0);
>> +	if (IS_ERR(mdev->mbox_chan)) {
>> +		dev_err(dev, "mbox chan for IPC is missing\n");
>> +		return PTR_ERR(mdev->mbox_chan);
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_qmp_remove(struct platform_device *pdev)
>> +{
>> +	struct qmp_device *mdev = platform_get_drvdata(pdev);
>> +
>> +	mbox_controller_unregister(&mdev->ctrl);
>> +	kfree(mdev->rx_pkt.iov_base);
>> +}
>> +
>> +static struct device *tmel_get_device(void)
>> +{
>> +	struct tmel *tdev = tmeldev;
>> +
>> +	if (!tdev)
>> +		return NULL;
>> +
>> +	return tdev->dev;
>> +}
>> +
>> +static int tmel_prepare_msg(struct tmel *tdev, u32 msg_uid,
>> +			    void *msg_buf, size_t msg_size)
>> +{
>> +	struct tmel_ipc_pkt *ipc_pkt = tdev->ipc_pkt;
>> +	struct ipc_header *msg_hdr = &ipc_pkt->msg_hdr;
>> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
>> +	struct sram_payload *sram_payload = &ipc_pkt->payload.sram_payload;
>> +	int ret;
>> +
>> +	memset(ipc_pkt, 0, sizeof(struct tmel_ipc_pkt));
>> +
>> +	msg_hdr->msg_type = TMEL_MSG_UID_MSG_TYPE(msg_uid);
>> +	msg_hdr->action_id = TMEL_MSG_UID_ACTION_ID(msg_uid);
>> +
>> +	pr_debug("uid: %d, msg_size: %zu msg_type:%d, action_id:%d\n",
>> +		 msg_uid, msg_size, msg_hdr->msg_type, msg_hdr->action_id);
>> +
>> +	if (sizeof(struct ipc_header) + msg_size <= MBOX_IPC_PACKET_SIZE) {
>> +		/* Mbox only */
>> +		msg_hdr->ipc_type = IPC_MBOX_ONLY;
>> +		msg_hdr->msg_len = msg_size;
>> +		memcpy((void *)mbox_payload, msg_buf, msg_size);
>> +	} else if (msg_size <= SRAM_IPC_MAX_BUF_SIZE) {
>> +		/* SRAM */
>> +		msg_hdr->ipc_type = IPC_MBOX_SRAM;
>> +		msg_hdr->msg_len = 8;
>> +
>> +		tdev->sram_dma_addr = dma_map_single(tdev->dev, msg_buf,
>> +						     msg_size,
>> +						     DMA_BIDIRECTIONAL);
>> +		ret = dma_mapping_error(tdev->dev, tdev->sram_dma_addr);
>> +		if (ret != 0) {
>> +			pr_err("SRAM DMA mapping error: %d\n", ret);
>> +			return ret;
>> +		}
>> +
>> +		sram_payload->payload_ptr = tdev->sram_dma_addr;
>> +		sram_payload->payload_len = msg_size;
>> +	} else {
>> +		pr_err("Invalid payload length: %zu\n", msg_size);
> 
> Return error?
> 
ok.

>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_unprepare_message(struct tmel *tdev,
>> +				   void *msg_buf, size_t msg_size)
>> +{
>> +	struct tmel_ipc_pkt *ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
>> +	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
>> +
>> +	if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_ONLY) {
>> +		memcpy(msg_buf, (void *)mbox_payload, msg_size);
>> +	} else if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_SRAM) {
>> +		dma_unmap_single(tdev->dev, tdev->sram_dma_addr, msg_size,
>> +				 DMA_BIDIRECTIONAL);
>> +		tdev->sram_dma_addr = 0;
>> +	}
>> +}
>> +
>> +static bool tmel_rx_done(struct tmel *tdev)
>> +{
>> +	return tdev->rx_done;
>> +}
>> +
>> +static int tmel_process_request(u32 msg_uid, void *msg_buf,
>> +				size_t msg_size)
>> +{
>> +	struct tmel *tdev = tmeldev;
>> +	unsigned long jiffies;
>> +	struct tmel_ipc_pkt *resp_ipc_pkt;
>> +	long time_left = 0;
>> +	int ret = 0;
>> +
>> +	/*
>> +	 * Check to handle if probe is not successful or not completed yet
>> +	 */
>> +	if (!tdev) {
>> +		pr_err("tmel dev is NULL\n");
>> +		return -ENODEV;
>> +	}
>> +
>> +	if (!msg_buf || !msg_size) {
>> +		pr_err("Invalid msg_buf or msg_size\n");
>> +		return -EINVAL;
>> +	}
>> +
>> +	mutex_lock(&tdev->lock);
>> +	tdev->rx_done = false;
>> +
>> +	ret = tmel_prepare_msg(tdev, msg_uid, msg_buf, msg_size);
>> +	if (ret)
>> +		return ret;
>> +
>> +	tdev->pkt.iov_len = sizeof(struct tmel_ipc_pkt);
>> +	tdev->pkt.iov_base = (void *)tdev->ipc_pkt;
>> +
>> +	qmp_send_data(tdev->mdev, &tdev->pkt);
>> +	jiffies = msecs_to_jiffies(30000);
>> +
>> +	time_left = wait_event_interruptible_timeout(tdev->waitq,
>> +						     tmel_rx_done(tdev),
>> +						     jiffies);
>> +
>> +	if (!time_left) {
>> +		pr_err("Request timed out\n");
>> +		ret = -ETIMEDOUT;
>> +		goto err_exit;
>> +	}
>> +
>> +	if (tdev->pkt.iov_len != sizeof(struct tmel_ipc_pkt)) {
>> +		pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
>> +		       tdev->pkt.iov_len, sizeof(struct tmel_ipc_pkt));
>> +		ret = -EPROTO;
>> +		goto err_exit;
>> +	}
>> +
>> +	resp_ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
>> +	tmel_unprepare_message(tdev, msg_buf, msg_size);
>> +	tdev->rx_done = false;
>> +	ret = resp_ipc_pkt->msg_hdr.response;
>> +
>> +err_exit:
>> +	mutex_unlock(&tdev->lock);
>> +	return ret;
>> +}
>> +
>> +static int tmel_secboot_sec_auth(u32 sw_id, void *metadata, size_t size)
>> +{
>> +	struct device *dev = tmel_get_device();
>> +	struct tmel_secboot_sec_auth *msg;
>> +	dma_addr_t elf_buf_phys;
>> +	void *elf_buf;
>> +	int ret;
>> +
>> +	if (!dev || !metadata)
>> +		return -EINVAL;
>> +
>> +	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
>> +
>> +	elf_buf = dma_alloc_coherent(dev, size, &elf_buf_phys, GFP_KERNEL);
>> +	if (!elf_buf)
>> +		return -ENOMEM;
>> +
>> +	memcpy(elf_buf, metadata, size);
>> +
>> +	msg->req.sw_id = sw_id;
>> +	msg->req.elf_buf.buf = (u32)elf_buf_phys;
>> +	msg->req.elf_buf.buf_len = (u32)size;
>> +
>> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SEC_AUTH, msg,
>> +				   sizeof(struct tmel_secboot_sec_auth));
>> +	if (ret) {
>> +		pr_err("Failed to send IPC: %d\n", ret);
>> +	} else if (msg->resp.status || msg->resp.extended_error) {
>> +		pr_err("Failed with status: %d error: %d\n",
>> +		       msg->resp.status, msg->resp.extended_error);
>> +		ret = msg->resp.status;
>> +	}
>> +
>> +	kfree(msg);
>> +	dma_free_coherent(dev, size, elf_buf, elf_buf_phys);
>> +
>> +	return ret;
>> +}
>> +
>> +static int tmel_secboot_teardown(u32 sw_id, u32 secondary_sw_id)
>> +{
>> +	struct device *dev = tmel_get_device();
>> +	struct tmel_secboot_teardown msg = {0};
>> +	int ret;
>> +
>> +	if (!dev)
>> +		return -EINVAL;
>> +
>> +	msg.req.sw_id = sw_id;
>> +	msg.req.secondary_sw_id = secondary_sw_id;
>> +	msg.resp.status = TMEL_ERROR_GENERIC;
>> +
>> +	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN, &msg,
>> +				   sizeof(msg));
>> +	if (ret) {
>> +		pr_err("Failed to send IPC: %d\n", ret);
>> +	} else if (msg.resp.status) {
>> +		pr_err("Failed with status: %d\n", msg.resp.status);
>> +		ret = msg.resp.status;
>> +	}
>> +
>> +	return ret;
>> +}
>> +
>> +static int tmel_init(struct qmp_device *mdev)
>> +{
>> +	struct tmel *tdev;
>> +
>> +	tdev = devm_kzalloc(mdev->dev, sizeof(*tdev), GFP_KERNEL);
>> +	if (!tdev)
>> +		return -ENOMEM;
>> +
>> +	mutex_init(&tdev->lock);
>> +
>> +	tdev->ipc_pkt = devm_kzalloc(mdev->dev, sizeof(struct tmel_ipc_pkt),
>> +				     GFP_KERNEL);
>> +	if (!tdev->ipc_pkt)
>> +		return -ENOMEM;
>> +
>> +	init_waitqueue_head(&tdev->waitq);
>> +
>> +	tdev->rx_done = false;
>> +	tdev->dev = mdev->dev;
>> +
>> +	tmeldev = tdev;
>> +	tmeldev->mdev = mdev;
>> +
>> +	return 0;
>> +}
>> +
>> +static int tmel_qmp_send(struct mbox_chan *chan, void *data)
>> +{
>> +	struct qmp_device *mdev = chan->con_priv;
>> +
>> +	mdev->qwork.data =  data;
>> +
>> +	queue_work(system_wq, &mdev->qwork.work);
>> +
>> +	return 0;
>> +}
>> +
>> +static void tmel_qmp_send_work(struct work_struct *work)
>> +{
>> +	struct qmp_work *qwork = container_of(work, struct qmp_work, work);
>> +	struct qmp_device *mdev = tmeldev->mdev;
>> +	struct mbox_chan *chan = &mdev->ctrl.chans[0];
>> +
>> +	struct tmel_qmp_msg *tmsg = qwork->data;
>> +	struct tmel_sec_auth *smsg = tmsg->msg;
>> +	int ret;
>> +
>> +	switch (tmsg->msg_id) {
>> +	case TMEL_MSG_UID_SECBOOT_SEC_AUTH:
>> +		ret = tmel_secboot_sec_auth(smsg->pas_id,
>> +					    smsg->data,
>> +					    smsg->size);
>> +		break;
>> +	case TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN:
>> +		ret = tmel_secboot_teardown(smsg->pas_id, 0);
>> +		break;
>> +	}
>> +
>> +	mbox_chan_txdone(chan, 0);
>> +}
>> +
>> +/**
>> + * tmel_qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
>> + *		      device. Make sure the channel is not already in use.
>> + * @mbox:       Mailbox device controlls the requested channel.
>> + * @spec:       Device tree arguments to specify which channel is requested.
>> + */
>> +static struct mbox_chan *tmel_qmp_mbox_of_xlate(struct mbox_controller *mbox,
>> +						const struct of_phandle_args *spec)
>> +{
>> +	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
>> +	unsigned int channel = spec->args[0];
>> +
>> +	if (!mdev)
>> +		return ERR_PTR(-EPROBE_DEFER);
>> +
>> +	if (channel >= mbox->num_chans)
>> +		return ERR_PTR(-EINVAL);
>> +
>> +	mutex_lock(&mdev->state_lock);
>> +	if (mdev->ch_in_use) {
>> +		dev_err(mdev->dev, "mbox channel already in use\n");
>> +		mutex_unlock(&mdev->state_lock);
>> +		return ERR_PTR(-EBUSY);
>> +	}
>> +	mdev->ch_in_use = true;
>> +	mutex_unlock(&mdev->state_lock);
>> +
>> +	return &mbox->chans[0];
>> +}
>> +
>> +static struct mbox_chan_ops tmel_qmp_ops = {
>> +	.startup = tmel_qmp_startup,
>> +	.shutdown = tmel_qmp_shutdown,
>> +	.send_data = tmel_qmp_send,
>> +};
>> +
>> +static int tmel_qmp_probe(struct platform_device *pdev)
>> +{
>> +	struct device_node *node = pdev->dev.of_node;
>> +	struct mbox_chan *chans;
>> +	struct qmp_device *mdev;
>> +	int ret = 0;
>> +
>> +	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
>> +	if (!mdev)
>> +		return -ENOMEM;
>> +
>> +	platform_set_drvdata(pdev, mdev);
>> +
>> +	ret = tmel_qmp_parse_devicetree(pdev, mdev);
>> +	if (ret)
>> +		return ret;
>> +
>> +	mdev->dev = &pdev->dev;
>> +
>> +	chans = devm_kzalloc(mdev->dev,
>> +			     sizeof(*chans) * QMP_NUM_CHANS, GFP_KERNEL);
>> +	if (!chans)
>> +		return -ENOMEM;
>> +
>> +	INIT_WORK(&mdev->qwork.work, tmel_qmp_send_work);
>> +
>> +	mdev->ctrl.dev = &pdev->dev;
>> +	mdev->ctrl.ops = &tmel_qmp_ops;
>> +	mdev->ctrl.chans = chans;
>> +	chans[0].con_priv = mdev;
>> +	mdev->ctrl.num_chans = QMP_NUM_CHANS;
>> +	mdev->ctrl.txdone_irq = true;
>> +	mdev->ctrl.of_xlate = tmel_qmp_mbox_of_xlate;
>> +
>> +	ret = mbox_controller_register(&mdev->ctrl);
> 
> devm_
> 
ok.

>> +	if (ret) {
>> +		dev_err(mdev->dev, "failed to register mbox controller\n");
>> +		return ret;
>> +	}
>> +
>> +	spin_lock_init(&mdev->tx_lock);
>> +	mutex_init(&mdev->state_lock);
>> +	mdev->local_state = LINK_DISCONNECTED;
>> +	init_completion(&mdev->link_complete);
>> +	init_completion(&mdev->ch_complete);
> 
> Oh, nice. So mbox is already there, but the structure is not
> initialized.
> 
Hmm, will fix and move before.

>> +
>> +	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
>> +
>> +	ret = platform_get_irq(pdev, 0);
>> +
>> +	ret = devm_request_threaded_irq(mdev->dev, ret,
>> +					NULL, qmp_irq_handler,
>> +					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
>> +					node->name, (void *)mdev);
> 
> drop type conversion.
> 
> Why is IRQ registered after mbox?
> 
will fix and move before.

>> +	if (ret < 0) {
>> +		dev_err(mdev->dev, "request threaded irq failed, ret %d\n",
>> +			ret);
>> +
>> +		tmel_qmp_remove(pdev);
>> +		return ret;
>> +	}
>> +
>> +	/* Receive any outstanding initial data */
>> +	tmel_init(mdev);
>> +	qmp_rx(mdev);
> 
> You've already registered a mailbox. There can be a message traffic already.
> 
Hmm, infact this early call once is to kickstart the state machine
transition from host side. So this handler should not have been
used. Will move that part alone to a start function and remove this.

>> +
>> +	return 0;
>> +}
>> +
>> +static const struct of_device_id tmel_qmp_dt_match[] = {
>> +	{ .compatible = "qcom,ipq5424-tmel-qmp" },
>> +	{},
>> +};
>> +
>> +static struct platform_driver tmel_qmp_driver = {
>> +	.driver = {
>> +		.name = "tmel_qmp_mbox",
>> +		.of_match_table = tmel_qmp_dt_match,
>> +	},
>> +	.probe = tmel_qmp_probe,
>> +	.remove = tmel_qmp_remove,
>> +};
>> +module_platform_driver(tmel_qmp_driver);
>> +
>> +MODULE_DESCRIPTION("QCOM TMEL QMP DRIVER");
> 
> driver
> 
ok.

>> +MODULE_LICENSE("GPL");
>> diff --git a/include/linux/mailbox/tmelcom-qmp.h b/include/linux/mailbox/tmelcom-qmp.h
>> new file mode 100644
>> index 000000000000..9fa450eaf736
>> --- /dev/null
>> +++ b/include/linux/mailbox/tmelcom-qmp.h
>> @@ -0,0 +1,157 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +/*
>> + * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
>> + */
>> +#ifndef _TMELCOM_H_
>> +#define _TMELCOM_H_
>> +
>> +/*----------------------------------------------------------------------------
>> + * Documentation
>> + * --------------------------------------------------------------------------
>> + */
>> +
>> +/*
>> + * TMEL Messages Unique Identifiers bit layout
>> +    _____________________________________
>> +   |	   |	    |	   |
>> +   | 31------16| 15-------8 | 7-------0 |
>> +   | Reserved  |messageType | actionID  |
>> +   |___________|____________|___________|
>> +	       \___________  ___________/
>> +			   \/
>> +		      TMEL_MSG_UID
>> +*/
> 
> #define instead of drawing pictures. Think about people using Braille
> terminals.
> 
ok.

>> +
>> +/*
>> + * TMEL Messages Unique Identifiers Parameter ID bit layout
>> +_________________________________________________________________________________________
>> +|     |     |     |     |     |     |     |     |     |     |     |    |    |    |       |
>> +|31-30|29-28|27-26|25-24|23-22|21-20|19-18|17-16|15-14|13-12|11-10|9--8|7--6|5--4|3-----0|
>> +| p14 | p13 | p12 | p11 | p10 | p9  | p8  | p7  | p6  | p5  | p4  | p3 | p2 | p1 | nargs |
>> +|type |type |type |type |type |type |type |type |type |type |type |type|type|type|       |
>> +|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|____|____|____|_______|
> 
> Totally unreadable and not helping. What is nargs? What kind of types
> are those?
> 
infact, the additional arguments and its type are not required for
the current services. Will remove it.

>> +
>> +*/
>> +
>> +/*
>> + * Macro used to define unique TMEL Message Identifier based on
>> + * message type and action identifier.
>> + */
>> +#define TMEL_MSG_UID_CREATE(m, a)	((u32)(((m & 0xff) << 8) | (a & 0xff)))
> 
> What is m and a? Please use sensible names in the API.
> 
Message type and Action id. Will expand.

>> +
>> +/** Helper macro to extract the messageType from TMEL_MSG_UID. */
>> +#define TMEL_MSG_UID_MSG_TYPE(v)	((v & GENMASK(15, 8)) >> 8)
> 
> #define MASK
> use FIELD_PREP, FIELD_GET
> 
ok.

>> +
>> +/** Helper macro to extract the actionID from TMEL_MSG_UID. */
>> +#define TMEL_MSG_UID_ACTION_ID(v)	(v & GENMASK(7, 0))
>> +
>> +/****************************************************************************
>> + *
>> + * All definitions of supported messageType's.
>> + *
>> + * 0x00 -> 0xF0 messageType used for production use cases.
>> + * 0xF1 -> 0xFF messageType reserved(can be used for test puprposes).
> 
> Which production use cases? Reerved by whom? Who can use those?
> 
its recommendation coming from FW and reserved for FW test purposes.
Beyond that, there is not much of info, may be will remove to avoid
confusion. Because, anyways none of the service message types are using
those reserved values.

Regards,
  Sricharan
diff mbox series

Patch

diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 8ecba7fb999e..8ad0b834d617 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -306,4 +306,11 @@  config THEAD_TH1520_MBOX
 	  kernel is running, and E902 core used for power management among other
 	  things.
 
+config QCOM_TMEL_QMP_MAILBOX
+	tristate "QCOM Mailbox Protocol(QMP) for TME-L SS"
+	help
+	  Say yes to add support for the QMP Mailbox Protocol driver for TME-L.
+	  QMP is a lightweight communication protocol for sending messages to
+	  TME-L. This protocol fits into the Generic Mailbox Framework.
+	  QMP uses a mailbox registers.
 endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 5f4f5b0ce2cc..4dba283a94ad 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -66,3 +66,5 @@  obj-$(CONFIG_QCOM_CPUCP_MBOX)	+= qcom-cpucp-mbox.o
 obj-$(CONFIG_QCOM_IPCC)		+= qcom-ipcc.o
 
 obj-$(CONFIG_THEAD_TH1520_MBOX)	+= mailbox-th1520.o
+
+obj-$(CONFIG_QCOM_TMEL_QMP_MAILBOX) += qcom-tmel-qmp.o
diff --git a/drivers/mailbox/qcom-tmel-qmp.c b/drivers/mailbox/qcom-tmel-qmp.c
new file mode 100644
index 000000000000..6de0a418e0ae
--- /dev/null
+++ b/drivers/mailbox/qcom-tmel-qmp.c
@@ -0,0 +1,971 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/tmelcom-qmp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/workqueue.h>
+
+#define QMP_NUM_CHANS	0x1
+#define QMP_TOUT_MS	1000
+#define MBOX_ALIGN_BYTES	3
+#define QMP_CTRL_DATA_SIZE	4
+#define QMP_MAX_PKT_SIZE	0x18
+#define QMP_UCORE_DESC_OFFSET	0x1000
+
+#define QMP_CH_VAR_GET(mdev, desc, var) ((mdev)->desc.bits.var)
+#define QMP_CH_VAR_SET(mdev, desc, var) (mdev)->desc.bits.var = 1
+#define QMP_CH_VAR_CLR(mdev, desc, var) (mdev)->desc.bits.var = 0
+
+#define QMP_MCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, mcore, var)
+#define QMP_MCORE_CH_VAR_SET(mdev, var)	QMP_CH_VAR_SET(mdev, mcore, var)
+#define QMP_MCORE_CH_VAR_CLR(mdev, var)	QMP_CH_VAR_CLR(mdev, mcore, var)
+
+#define QMP_MCORE_CH_VAR_TOGGLE(mdev, var) \
+	(mdev)->mcore.bits.var = !((mdev)->mcore.bits.var)
+#define QMP_MCORE_CH_ACKED_CHECK(mdev, var) \
+	((mdev)->ucore.bits.var == (mdev)->mcore.bits.var##_ack)
+#define QMP_MCORE_CH_ACK_UPDATE(mdev, var) \
+	(mdev)->mcore.bits.var##_ack = (mdev)->ucore.bits.var
+#define QMP_MCORE_CH_VAR_ACK_CLR(mdev, var) \
+	(mdev)->mcore.bits.var##_ack = 0
+
+#define QMP_UCORE_CH_VAR_GET(mdev, var)	QMP_CH_VAR_GET(mdev, ucore, var)
+#define QMP_UCORE_CH_ACKED_CHECK(mdev, var) \
+	((mdev)->mcore.bits.var == (mdev)->ucore.bits.var##_ack)
+#define QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, var) \
+	((mdev)->ucore.bits.var != (mdev)->mcore.bits.var##_ack)
+
+/**
+ * enum qmp_local_state -	definition of the local state machine
+ * @LINK_DISCONNECTED:		Init state, waiting for ucore to start
+ * @LINK_NEGOTIATION:		Set local link state to up, wait for ucore ack
+ * @LINK_CONNECTED:		Link state up, channel not connected
+ * @LOCAL_CONNECTING:		Channel opening locally, wait for ucore ack
+ * @CHANNEL_CONNECTED:		Channel fully opened
+ * @LOCAL_DISCONNECTING:	Channel closing locally, wait for ucore ack
+ */
+enum qmp_local_state {
+	LINK_DISCONNECTED,
+	LINK_NEGOTIATION,
+	LINK_CONNECTED,
+	LOCAL_CONNECTING,
+	CHANNEL_CONNECTED,
+	LOCAL_DISCONNECTING,
+};
+
+union channel_desc {
+	struct {
+		u32 link_state:1;
+		u32 link_state_ack:1;
+		u32 ch_state:1;
+		u32 ch_state_ack:1;
+		u32 tx:1;
+		u32 tx_ack:1;
+		u32 rx_done:1;
+		u32 rx_done_ack:1;
+		u32 read_int:1;
+		u32 read_int_ack:1;
+		u32 reserved:6;
+		u32 frag_size:8;
+		u32 rem_frag_count:8;
+	} bits;
+	unsigned int val;
+};
+
+struct qmp_work {
+	struct work_struct work;
+	void *data;
+};
+
+/**
+ * struct qmp_device - local information for managing a single mailbox
+ * @dev:	    The device that corresponds to this mailbox
+ * @ctrl:	    The mbox controller for this mailbox
+ * @mcore_desc:	    Local core (APSS) mailbox descriptor
+ * @ucore_desc:	    Remote core (TME-L) mailbox descriptor
+ * @mcore:	    Local core (APSS) channel descriptor
+ * @ucore:	    Remote core (TME-L) channel descriptor
+ * @rx_pkt:	    Buffer to pass to client, holds received data from mailbox
+ * @tx_pkt:	    Buffer from client, holds data to send on mailbox
+ * @mbox_client:    Mailbox client for the IPC interrupt
+ * @mbox_chan:	    Mailbox client chan for the IPC interrupt
+ * @local_state:    Current state of mailbox protocol
+ * @state_lock:	    Serialize mailbox state changes
+ * @tx_lock:	    Serialize access for writes to mailbox
+ * @link_complete:  Use to block until link negotiation with remote proc
+ * @ch_complete:    Use to block until the channel is fully opened
+ * @dwork:	    Delayed work to detect timed out tx
+ * @tx_sent:	    True if tx is sent and remote proc has not sent ack
+ */
+struct qmp_device {
+	struct device *dev;
+	struct mbox_controller ctrl;
+	struct qmp_work qwork;
+
+	void __iomem *mcore_desc;
+	void __iomem *ucore_desc;
+	union channel_desc mcore;
+	union channel_desc ucore;
+
+	struct kvec rx_pkt;
+	struct kvec tx_pkt;
+
+	struct mbox_client mbox_client;
+	struct mbox_chan *mbox_chan;
+
+	enum qmp_local_state local_state;
+
+	/* Lock for QMP link state changes */
+	struct mutex state_lock;
+	/* Lock to serialize access to mailbox */
+	spinlock_t tx_lock;
+
+	struct completion link_complete;
+	struct completion ch_complete;
+	struct delayed_work dwork;
+	void *data;
+
+	bool tx_sent;
+	bool ch_in_use;
+};
+
+struct tmel_msg_param_type_buf_in {
+	u32 buf;
+	u32 buf_len;
+};
+
+struct tmel_secboot_sec_auth_req {
+	u32 sw_id;
+	struct tmel_msg_param_type_buf_in elf_buf;
+	struct tmel_msg_param_type_buf_in region_list;
+	u32 relocate;
+} __packed;
+
+struct tmel_secboot_sec_auth_resp {
+	u32 first_seg_addr;
+	u32 first_seg_len;
+	u32 entry_addr;
+	u32 extended_error;
+	u32 status;
+} __packed;
+
+struct tmel_secboot_sec_auth {
+	struct tmel_secboot_sec_auth_req req;
+	struct tmel_secboot_sec_auth_resp resp;
+} __packed;
+
+struct tmel_secboot_teardown_req {
+	u32 sw_id;
+	u32 secondary_sw_id;
+} __packed;
+
+struct tmel_secboot_teardown_resp {
+	u32 status;
+} __packed;
+
+struct tmel_secboot_teardown {
+	struct tmel_secboot_teardown_req req;
+	struct tmel_secboot_teardown_resp resp;
+} __packed;
+
+struct tmel {
+	struct device *dev;
+	struct qmp_device *mdev;
+	struct kvec pkt;
+	/* To serialize incoming tmel request */
+	struct mutex lock;
+	struct tmel_ipc_pkt *ipc_pkt;
+	dma_addr_t sram_dma_addr;
+	wait_queue_head_t waitq;
+	bool rx_done;
+};
+
+static struct tmel *tmeldev;
+
+/**
+ * qmp_send_irq() - send an irq to a remote entity as an event signal.
+ * @mdev:       Which remote entity that should receive the irq.
+ */
+static void qmp_send_irq(struct qmp_device *mdev)
+{
+	/* Update the mcore val to mcore register */
+	iowrite32(mdev->mcore.val, mdev->mcore_desc);
+	/* Ensure desc update is visible before IPC */
+	wmb();
+
+	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
+		mdev->mcore.val, mdev->ucore.val);
+
+	mbox_send_message(mdev->mbox_chan, NULL);
+	mbox_client_txdone(mdev->mbox_chan, 0);
+}
+
+/**
+ * qmp_notify_timeout() - Notify client of tx timeout with -ETIME
+ * @work:		  Structure for work that was scheduled.
+ */
+static void qmp_notify_timeout(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork);
+	struct mbox_chan *chan = &mdev->ctrl.chans[0];
+	int err = -ETIME;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->tx_lock, flags);
+	if (!mdev->tx_sent) {
+		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+		return;
+	}
+	mdev->tx_sent = false;
+	spin_unlock_irqrestore(&mdev->tx_lock, flags);
+	dev_dbg(mdev->dev, "%s: TX timeout", __func__);
+	mbox_chan_txdone(chan, err);
+}
+
+static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev)
+{
+	schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TOUT_MS));
+}
+
+/**
+ * tmel_qmp_startup() - Start qmp mailbox channel for communication. Waits for
+ *		       remote subsystem to open channel if link is not
+ *		       initated or until timeout.
+ * @chan:	       mailbox channel that is being opened.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int tmel_qmp_startup(struct mbox_chan *chan)
+{
+	struct qmp_device *mdev = chan->con_priv;
+	int ret;
+
+	if (!mdev)
+		return -EINVAL;
+
+	ret = wait_for_completion_timeout(&mdev->link_complete,
+					  msecs_to_jiffies(QMP_TOUT_MS));
+	if (!ret)
+		return -EAGAIN;
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->local_state == LINK_CONNECTED) {
+		QMP_MCORE_CH_VAR_SET(mdev, ch_state);
+		mdev->local_state = LOCAL_CONNECTING;
+		dev_dbg(mdev->dev, "link complete, local connecting");
+		qmp_send_irq(mdev);
+	}
+	mutex_unlock(&mdev->state_lock);
+
+	ret = wait_for_completion_timeout(&mdev->ch_complete,
+					  msecs_to_jiffies(QMP_TOUT_MS));
+	if (!ret)
+		return -ETIME;
+
+	return 0;
+}
+
+/**
+ * qmp_send_data() - Copy the data to the channel's mailbox and notify
+ *		     remote subsystem of new data. This function will
+ *		     return an error if the previous message sent has
+ *		     not been read. Cannot Sleep.
+ * @chan:	mailbox channel that data is to be sent over.
+ * @data:	Data to be sent to remote processor, should be in the format of
+ *		a kvec.
+ *
+ * Return: 0 on succes or standard Linux error code.
+ */
+static int qmp_send_data(struct qmp_device *mdev, void *data)
+{
+	struct kvec *pkt = (struct kvec *)data;
+	void __iomem *addr;
+	unsigned long flags;
+
+	if (!mdev || !data || !completion_done(&mdev->ch_complete))
+		return -EINVAL;
+
+	if (pkt->iov_len > QMP_MAX_PKT_SIZE) {
+		dev_err(mdev->dev, "Unsupported packet size %lu\n", pkt->iov_len);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&mdev->tx_lock, flags);
+	if (mdev->tx_sent) {
+		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+		return -EAGAIN;
+	}
+
+	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
+		mdev->mcore.val, mdev->ucore.val);
+
+	addr = mdev->mcore_desc + QMP_CTRL_DATA_SIZE;
+	memcpy_toio(addr, pkt->iov_base, pkt->iov_len);
+
+	mdev->mcore.bits.frag_size = pkt->iov_len;
+	mdev->mcore.bits.rem_frag_count = 0;
+
+	dev_dbg(mdev->dev, "Copied buffer to mbox, sz: %d",
+		mdev->mcore.bits.frag_size);
+
+	mdev->tx_sent = true;
+	QMP_MCORE_CH_VAR_TOGGLE(mdev, tx);
+	qmp_send_irq(mdev);
+	qmp_schedule_tx_timeout(mdev);
+	spin_unlock_irqrestore(&mdev->tx_lock, flags);
+
+	return 0;
+}
+
+/**
+ * tmel_qmp_shutdown() - Disconnect this mailbox channel so the client does not
+ *			 receive anymore data and can reliquish control
+ *			 of the channel.
+ * @chan:		 mailbox channel to be shutdown.
+ */
+static void tmel_qmp_shutdown(struct mbox_chan *chan)
+{
+	struct qmp_device *mdev = chan->con_priv;
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->local_state != LINK_DISCONNECTED) {
+		mdev->local_state = LOCAL_DISCONNECTING;
+		QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
+		qmp_send_irq(mdev);
+	}
+	mutex_unlock(&mdev->state_lock);
+}
+
+static void tmel_receive_message(void *message)
+{
+	struct tmel *tdev = tmeldev;
+	struct kvec *pkt = NULL;
+
+	if (!message) {
+		pr_err("spurious message received\n");
+		goto tmel_receive_end;
+	}
+
+	if (tdev->rx_done) {
+		pr_err("tmel response pending\n");
+		goto tmel_receive_end;
+	}
+
+	pkt = (struct kvec *)message;
+	tdev->pkt.iov_len = pkt->iov_len;
+	tdev->pkt.iov_base = pkt->iov_base;
+	tdev->rx_done = true;
+
+tmel_receive_end:
+	wake_up_interruptible(&tdev->waitq);
+}
+
+/**
+ * qmp_recv_data() -	received notification that data is available in the
+ *			mailbox. Copy data from mailbox and pass to client.
+ * @mbox:		mailbox device that received the notification.
+ * @mbox_of:		offset of mailbox after QMP Control data.
+ */
+static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of)
+{
+	void __iomem *addr;
+	struct kvec *pkt;
+
+	addr = mdev->ucore_desc + mbox_of;
+	pkt = &mdev->rx_pkt;
+	pkt->iov_len = mdev->ucore.bits.frag_size;
+
+	memcpy_fromio(pkt->iov_base, addr, pkt->iov_len);
+	QMP_MCORE_CH_ACK_UPDATE(mdev, tx);
+	dev_dbg(mdev->dev, "%s: Send RX data to TMEL Client", __func__);
+	tmel_receive_message(pkt);
+
+	QMP_MCORE_CH_VAR_TOGGLE(mdev, rx_done);
+	qmp_send_irq(mdev);
+}
+
+/**
+ * clr_mcore_ch_state() - Clear the mcore state of a mailbox.
+ * @mdev:	mailbox device to be initialized.
+ */
+static void clr_mcore_ch_state(struct qmp_device *mdev)
+{
+	QMP_MCORE_CH_VAR_CLR(mdev, ch_state);
+	QMP_MCORE_CH_VAR_ACK_CLR(mdev, ch_state);
+
+	QMP_MCORE_CH_VAR_CLR(mdev, tx);
+	QMP_MCORE_CH_VAR_ACK_CLR(mdev, tx);
+
+	QMP_MCORE_CH_VAR_CLR(mdev, rx_done);
+	QMP_MCORE_CH_VAR_ACK_CLR(mdev, rx_done);
+
+	QMP_MCORE_CH_VAR_CLR(mdev, read_int);
+	QMP_MCORE_CH_VAR_ACK_CLR(mdev, read_int);
+
+	mdev->mcore.bits.frag_size = 0;
+	mdev->mcore.bits.rem_frag_count = 0;
+}
+
+/**
+ * qmp_rx() - Handle incoming messages from remote processor.
+ * @mbox:	mailbox device that received notification.
+ */
+static void qmp_rx(struct qmp_device *mdev)
+{
+	unsigned long flags;
+
+	/* read remote_desc from mailbox register */
+	mdev->ucore.val = ioread32(mdev->ucore_desc);
+
+	dev_dbg(mdev->dev, "%s: mcore 0x%x ucore 0x%x", __func__,
+		mdev->mcore.val, mdev->ucore.val);
+
+	mutex_lock(&mdev->state_lock);
+
+	/* Check if remote link down */
+	if (mdev->local_state >= LINK_CONNECTED &&
+	    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
+		mdev->local_state = LINK_NEGOTIATION;
+		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
+		qmp_send_irq(mdev);
+		mutex_unlock(&mdev->state_lock);
+		return;
+	}
+
+	switch (mdev->local_state) {
+	case LINK_DISCONNECTED:
+		QMP_MCORE_CH_VAR_SET(mdev, link_state);
+		mdev->local_state = LINK_NEGOTIATION;
+		mdev->rx_pkt.iov_base = kzalloc(QMP_MAX_PKT_SIZE,
+						GFP_KERNEL);
+
+		if (!mdev->rx_pkt.iov_base) {
+			dev_err(mdev->dev, "rx pkt alloc failed");
+			break;
+		}
+		dev_dbg(mdev->dev, "Set to link negotiation");
+		qmp_send_irq(mdev);
+
+		break;
+	case LINK_NEGOTIATION:
+		if (!QMP_MCORE_CH_VAR_GET(mdev, link_state) ||
+		    !QMP_UCORE_CH_VAR_GET(mdev, link_state)) {
+			dev_err(mdev->dev, "rx irq:link down state\n");
+			break;
+		}
+
+		clr_mcore_ch_state(mdev);
+		QMP_MCORE_CH_ACK_UPDATE(mdev, link_state);
+		mdev->local_state = LINK_CONNECTED;
+		complete_all(&mdev->link_complete);
+		dev_dbg(mdev->dev, "Set to link connected");
+
+		break;
+	case LINK_CONNECTED:
+		/* No need to handle until local opens */
+		break;
+	case LOCAL_CONNECTING:
+		/* Ack to remote ch_state change */
+		QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
+
+		mdev->local_state = CHANNEL_CONNECTED;
+		complete_all(&mdev->ch_complete);
+		dev_dbg(mdev->dev, "Set to channel connected");
+		qmp_send_irq(mdev);
+		break;
+	case CHANNEL_CONNECTED:
+		/* Check for remote channel down */
+		if (!QMP_UCORE_CH_VAR_GET(mdev, ch_state)) {
+			mdev->local_state = LOCAL_CONNECTING;
+			QMP_MCORE_CH_ACK_UPDATE(mdev, ch_state);
+			dev_dbg(mdev->dev, "Remote Disconnect");
+			qmp_send_irq(mdev);
+		}
+
+		spin_lock_irqsave(&mdev->tx_lock, flags);
+		/* Check TX done */
+		if (mdev->tx_sent &&
+		    QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, rx_done)) {
+			/* Ack to remote */
+			QMP_MCORE_CH_ACK_UPDATE(mdev, rx_done);
+			mdev->tx_sent = false;
+			cancel_delayed_work(&mdev->dwork);
+			dev_dbg(mdev->dev, "TX flag cleared");
+		}
+		spin_unlock_irqrestore(&mdev->tx_lock, flags);
+
+		/* Check if remote is Transmitting */
+		if (!QMP_UCORE_CH_VAR_TOGGLED_CHECK(mdev, tx))
+			break;
+		if (mdev->ucore.bits.frag_size == 0 ||
+		    mdev->ucore.bits.frag_size > QMP_MAX_PKT_SIZE) {
+			dev_err(mdev->dev, "Rx frag size error %d\n",
+				mdev->ucore.bits.frag_size);
+			break;
+		}
+
+		qmp_recv_data(mdev, QMP_CTRL_DATA_SIZE);
+		break;
+	case LOCAL_DISCONNECTING:
+		if (!QMP_MCORE_CH_VAR_GET(mdev, ch_state)) {
+			clr_mcore_ch_state(mdev);
+			mdev->local_state = LINK_CONNECTED;
+			dev_dbg(mdev->dev, "Channel closed");
+			reinit_completion(&mdev->ch_complete);
+		}
+
+		break;
+	default:
+		dev_err(mdev->dev, "Local Channel State corrupted\n");
+	}
+	mutex_unlock(&mdev->state_lock);
+}
+
+static irqreturn_t qmp_irq_handler(int irq, void *priv)
+{
+	struct qmp_device *mdev = (struct qmp_device *)priv;
+
+	qmp_rx(mdev);
+
+	return IRQ_HANDLED;
+}
+
+static int tmel_qmp_parse_devicetree(struct platform_device *pdev,
+				     struct qmp_device *mdev)
+{
+	struct device *dev = &pdev->dev;
+
+	mdev->mcore_desc = devm_platform_ioremap_resource(pdev, 0);
+	if (!mdev->mcore_desc) {
+		dev_err(dev, "ioremap failed for mcore reg\n");
+		return -EIO;
+	}
+
+	mdev->ucore_desc = mdev->mcore_desc + QMP_UCORE_DESC_OFFSET;
+
+	mdev->mbox_client.dev = dev;
+	mdev->mbox_client.knows_txdone = false;
+	mdev->mbox_chan = mbox_request_channel(&mdev->mbox_client, 0);
+	if (IS_ERR(mdev->mbox_chan)) {
+		dev_err(dev, "mbox chan for IPC is missing\n");
+		return PTR_ERR(mdev->mbox_chan);
+	}
+
+	return 0;
+}
+
+static void tmel_qmp_remove(struct platform_device *pdev)
+{
+	struct qmp_device *mdev = platform_get_drvdata(pdev);
+
+	mbox_controller_unregister(&mdev->ctrl);
+	kfree(mdev->rx_pkt.iov_base);
+}
+
+static struct device *tmel_get_device(void)
+{
+	struct tmel *tdev = tmeldev;
+
+	if (!tdev)
+		return NULL;
+
+	return tdev->dev;
+}
+
+static int tmel_prepare_msg(struct tmel *tdev, u32 msg_uid,
+			    void *msg_buf, size_t msg_size)
+{
+	struct tmel_ipc_pkt *ipc_pkt = tdev->ipc_pkt;
+	struct ipc_header *msg_hdr = &ipc_pkt->msg_hdr;
+	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
+	struct sram_payload *sram_payload = &ipc_pkt->payload.sram_payload;
+	int ret;
+
+	memset(ipc_pkt, 0, sizeof(struct tmel_ipc_pkt));
+
+	msg_hdr->msg_type = TMEL_MSG_UID_MSG_TYPE(msg_uid);
+	msg_hdr->action_id = TMEL_MSG_UID_ACTION_ID(msg_uid);
+
+	pr_debug("uid: %d, msg_size: %zu msg_type:%d, action_id:%d\n",
+		 msg_uid, msg_size, msg_hdr->msg_type, msg_hdr->action_id);
+
+	if (sizeof(struct ipc_header) + msg_size <= MBOX_IPC_PACKET_SIZE) {
+		/* Mbox only */
+		msg_hdr->ipc_type = IPC_MBOX_ONLY;
+		msg_hdr->msg_len = msg_size;
+		memcpy((void *)mbox_payload, msg_buf, msg_size);
+	} else if (msg_size <= SRAM_IPC_MAX_BUF_SIZE) {
+		/* SRAM */
+		msg_hdr->ipc_type = IPC_MBOX_SRAM;
+		msg_hdr->msg_len = 8;
+
+		tdev->sram_dma_addr = dma_map_single(tdev->dev, msg_buf,
+						     msg_size,
+						     DMA_BIDIRECTIONAL);
+		ret = dma_mapping_error(tdev->dev, tdev->sram_dma_addr);
+		if (ret != 0) {
+			pr_err("SRAM DMA mapping error: %d\n", ret);
+			return ret;
+		}
+
+		sram_payload->payload_ptr = tdev->sram_dma_addr;
+		sram_payload->payload_len = msg_size;
+	} else {
+		pr_err("Invalid payload length: %zu\n", msg_size);
+	}
+
+	return 0;
+}
+
+static void tmel_unprepare_message(struct tmel *tdev,
+				   void *msg_buf, size_t msg_size)
+{
+	struct tmel_ipc_pkt *ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
+	struct mbox_payload *mbox_payload = &ipc_pkt->payload.mbox_payload;
+
+	if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_ONLY) {
+		memcpy(msg_buf, (void *)mbox_payload, msg_size);
+	} else if (ipc_pkt->msg_hdr.ipc_type == IPC_MBOX_SRAM) {
+		dma_unmap_single(tdev->dev, tdev->sram_dma_addr, msg_size,
+				 DMA_BIDIRECTIONAL);
+		tdev->sram_dma_addr = 0;
+	}
+}
+
+static bool tmel_rx_done(struct tmel *tdev)
+{
+	return tdev->rx_done;
+}
+
+static int tmel_process_request(u32 msg_uid, void *msg_buf,
+				size_t msg_size)
+{
+	struct tmel *tdev = tmeldev;
+	unsigned long jiffies;
+	struct tmel_ipc_pkt *resp_ipc_pkt;
+	long time_left = 0;
+	int ret = 0;
+
+	/*
+	 * Check to handle if probe is not successful or not completed yet
+	 */
+	if (!tdev) {
+		pr_err("tmel dev is NULL\n");
+		return -ENODEV;
+	}
+
+	if (!msg_buf || !msg_size) {
+		pr_err("Invalid msg_buf or msg_size\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&tdev->lock);
+	tdev->rx_done = false;
+
+	ret = tmel_prepare_msg(tdev, msg_uid, msg_buf, msg_size);
+	if (ret)
+		return ret;
+
+	tdev->pkt.iov_len = sizeof(struct tmel_ipc_pkt);
+	tdev->pkt.iov_base = (void *)tdev->ipc_pkt;
+
+	qmp_send_data(tdev->mdev, &tdev->pkt);
+	jiffies = msecs_to_jiffies(30000);
+
+	time_left = wait_event_interruptible_timeout(tdev->waitq,
+						     tmel_rx_done(tdev),
+						     jiffies);
+
+	if (!time_left) {
+		pr_err("Request timed out\n");
+		ret = -ETIMEDOUT;
+		goto err_exit;
+	}
+
+	if (tdev->pkt.iov_len != sizeof(struct tmel_ipc_pkt)) {
+		pr_err("Invalid pkt.size received size: %lu, expected: %zu\n",
+		       tdev->pkt.iov_len, sizeof(struct tmel_ipc_pkt));
+		ret = -EPROTO;
+		goto err_exit;
+	}
+
+	resp_ipc_pkt = (struct tmel_ipc_pkt *)tdev->pkt.iov_base;
+	tmel_unprepare_message(tdev, msg_buf, msg_size);
+	tdev->rx_done = false;
+	ret = resp_ipc_pkt->msg_hdr.response;
+
+err_exit:
+	mutex_unlock(&tdev->lock);
+	return ret;
+}
+
+static int tmel_secboot_sec_auth(u32 sw_id, void *metadata, size_t size)
+{
+	struct device *dev = tmel_get_device();
+	struct tmel_secboot_sec_auth *msg;
+	dma_addr_t elf_buf_phys;
+	void *elf_buf;
+	int ret;
+
+	if (!dev || !metadata)
+		return -EINVAL;
+
+	msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+
+	elf_buf = dma_alloc_coherent(dev, size, &elf_buf_phys, GFP_KERNEL);
+	if (!elf_buf)
+		return -ENOMEM;
+
+	memcpy(elf_buf, metadata, size);
+
+	msg->req.sw_id = sw_id;
+	msg->req.elf_buf.buf = (u32)elf_buf_phys;
+	msg->req.elf_buf.buf_len = (u32)size;
+
+	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SEC_AUTH, msg,
+				   sizeof(struct tmel_secboot_sec_auth));
+	if (ret) {
+		pr_err("Failed to send IPC: %d\n", ret);
+	} else if (msg->resp.status || msg->resp.extended_error) {
+		pr_err("Failed with status: %d error: %d\n",
+		       msg->resp.status, msg->resp.extended_error);
+		ret = msg->resp.status;
+	}
+
+	kfree(msg);
+	dma_free_coherent(dev, size, elf_buf, elf_buf_phys);
+
+	return ret;
+}
+
+static int tmel_secboot_teardown(u32 sw_id, u32 secondary_sw_id)
+{
+	struct device *dev = tmel_get_device();
+	struct tmel_secboot_teardown msg = {0};
+	int ret;
+
+	if (!dev)
+		return -EINVAL;
+
+	msg.req.sw_id = sw_id;
+	msg.req.secondary_sw_id = secondary_sw_id;
+	msg.resp.status = TMEL_ERROR_GENERIC;
+
+	ret = tmel_process_request(TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN, &msg,
+				   sizeof(msg));
+	if (ret) {
+		pr_err("Failed to send IPC: %d\n", ret);
+	} else if (msg.resp.status) {
+		pr_err("Failed with status: %d\n", msg.resp.status);
+		ret = msg.resp.status;
+	}
+
+	return ret;
+}
+
+static int tmel_init(struct qmp_device *mdev)
+{
+	struct tmel *tdev;
+
+	tdev = devm_kzalloc(mdev->dev, sizeof(*tdev), GFP_KERNEL);
+	if (!tdev)
+		return -ENOMEM;
+
+	mutex_init(&tdev->lock);
+
+	tdev->ipc_pkt = devm_kzalloc(mdev->dev, sizeof(struct tmel_ipc_pkt),
+				     GFP_KERNEL);
+	if (!tdev->ipc_pkt)
+		return -ENOMEM;
+
+	init_waitqueue_head(&tdev->waitq);
+
+	tdev->rx_done = false;
+	tdev->dev = mdev->dev;
+
+	tmeldev = tdev;
+	tmeldev->mdev = mdev;
+
+	return 0;
+}
+
+static int tmel_qmp_send(struct mbox_chan *chan, void *data)
+{
+	struct qmp_device *mdev = chan->con_priv;
+
+	mdev->qwork.data =  data;
+
+	queue_work(system_wq, &mdev->qwork.work);
+
+	return 0;
+}
+
+static void tmel_qmp_send_work(struct work_struct *work)
+{
+	struct qmp_work *qwork = container_of(work, struct qmp_work, work);
+	struct qmp_device *mdev = tmeldev->mdev;
+	struct mbox_chan *chan = &mdev->ctrl.chans[0];
+
+	struct tmel_qmp_msg *tmsg = qwork->data;
+	struct tmel_sec_auth *smsg = tmsg->msg;
+	int ret;
+
+	switch (tmsg->msg_id) {
+	case TMEL_MSG_UID_SECBOOT_SEC_AUTH:
+		ret = tmel_secboot_sec_auth(smsg->pas_id,
+					    smsg->data,
+					    smsg->size);
+		break;
+	case TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN:
+		ret = tmel_secboot_teardown(smsg->pas_id, 0);
+		break;
+	}
+
+	mbox_chan_txdone(chan, 0);
+}
+
+/**
+ * tmel_qmp_mbox_of_xlate() - Returns a mailbox channel to be used for this mailbox
+ *		      device. Make sure the channel is not already in use.
+ * @mbox:       Mailbox device controlls the requested channel.
+ * @spec:       Device tree arguments to specify which channel is requested.
+ */
+static struct mbox_chan *tmel_qmp_mbox_of_xlate(struct mbox_controller *mbox,
+						const struct of_phandle_args *spec)
+{
+	struct qmp_device *mdev = dev_get_drvdata(mbox->dev);
+	unsigned int channel = spec->args[0];
+
+	if (!mdev)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (channel >= mbox->num_chans)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&mdev->state_lock);
+	if (mdev->ch_in_use) {
+		dev_err(mdev->dev, "mbox channel already in use\n");
+		mutex_unlock(&mdev->state_lock);
+		return ERR_PTR(-EBUSY);
+	}
+	mdev->ch_in_use = true;
+	mutex_unlock(&mdev->state_lock);
+
+	return &mbox->chans[0];
+}
+
+static struct mbox_chan_ops tmel_qmp_ops = {
+	.startup = tmel_qmp_startup,
+	.shutdown = tmel_qmp_shutdown,
+	.send_data = tmel_qmp_send,
+};
+
+static int tmel_qmp_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct mbox_chan *chans;
+	struct qmp_device *mdev;
+	int ret = 0;
+
+	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, mdev);
+
+	ret = tmel_qmp_parse_devicetree(pdev, mdev);
+	if (ret)
+		return ret;
+
+	mdev->dev = &pdev->dev;
+
+	chans = devm_kzalloc(mdev->dev,
+			     sizeof(*chans) * QMP_NUM_CHANS, GFP_KERNEL);
+	if (!chans)
+		return -ENOMEM;
+
+	INIT_WORK(&mdev->qwork.work, tmel_qmp_send_work);
+
+	mdev->ctrl.dev = &pdev->dev;
+	mdev->ctrl.ops = &tmel_qmp_ops;
+	mdev->ctrl.chans = chans;
+	chans[0].con_priv = mdev;
+	mdev->ctrl.num_chans = QMP_NUM_CHANS;
+	mdev->ctrl.txdone_irq = true;
+	mdev->ctrl.of_xlate = tmel_qmp_mbox_of_xlate;
+
+	ret = mbox_controller_register(&mdev->ctrl);
+	if (ret) {
+		dev_err(mdev->dev, "failed to register mbox controller\n");
+		return ret;
+	}
+
+	spin_lock_init(&mdev->tx_lock);
+	mutex_init(&mdev->state_lock);
+	mdev->local_state = LINK_DISCONNECTED;
+	init_completion(&mdev->link_complete);
+	init_completion(&mdev->ch_complete);
+
+	INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout);
+
+	ret = platform_get_irq(pdev, 0);
+
+	ret = devm_request_threaded_irq(mdev->dev, ret,
+					NULL, qmp_irq_handler,
+					IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+					node->name, (void *)mdev);
+	if (ret < 0) {
+		dev_err(mdev->dev, "request threaded irq failed, ret %d\n",
+			ret);
+
+		tmel_qmp_remove(pdev);
+		return ret;
+	}
+
+	/* Receive any outstanding initial data */
+	tmel_init(mdev);
+	qmp_rx(mdev);
+
+	return 0;
+}
+
+static const struct of_device_id tmel_qmp_dt_match[] = {
+	{ .compatible = "qcom,ipq5424-tmel-qmp" },
+	{},
+};
+
+static struct platform_driver tmel_qmp_driver = {
+	.driver = {
+		.name = "tmel_qmp_mbox",
+		.of_match_table = tmel_qmp_dt_match,
+	},
+	.probe = tmel_qmp_probe,
+	.remove = tmel_qmp_remove,
+};
+module_platform_driver(tmel_qmp_driver);
+
+MODULE_DESCRIPTION("QCOM TMEL QMP DRIVER");
+MODULE_LICENSE("GPL");
diff --git a/include/linux/mailbox/tmelcom-qmp.h b/include/linux/mailbox/tmelcom-qmp.h
new file mode 100644
index 000000000000..9fa450eaf736
--- /dev/null
+++ b/include/linux/mailbox/tmelcom-qmp.h
@@ -0,0 +1,157 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _TMELCOM_H_
+#define _TMELCOM_H_
+
+/*----------------------------------------------------------------------------
+ * Documentation
+ * --------------------------------------------------------------------------
+ */
+
+/*
+ * TMEL Messages Unique Identifiers bit layout
+    _____________________________________
+   |	   |	    |	   |
+   | 31------16| 15-------8 | 7-------0 |
+   | Reserved  |messageType | actionID  |
+   |___________|____________|___________|
+	       \___________  ___________/
+			   \/
+		      TMEL_MSG_UID
+*/
+
+/*
+ * TMEL Messages Unique Identifiers Parameter ID bit layout
+_________________________________________________________________________________________
+|     |     |     |     |     |     |     |     |     |     |     |    |    |    |       |
+|31-30|29-28|27-26|25-24|23-22|21-20|19-18|17-16|15-14|13-12|11-10|9--8|7--6|5--4|3-----0|
+| p14 | p13 | p12 | p11 | p10 | p9  | p8  | p7  | p6  | p5  | p4  | p3 | p2 | p1 | nargs |
+|type |type |type |type |type |type |type |type |type |type |type |type|type|type|       |
+|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|_____|____|____|____|_______|
+
+*/
+
+/*
+ * Macro used to define unique TMEL Message Identifier based on
+ * message type and action identifier.
+ */
+#define TMEL_MSG_UID_CREATE(m, a)	((u32)(((m & 0xff) << 8) | (a & 0xff)))
+
+/** Helper macro to extract the messageType from TMEL_MSG_UID. */
+#define TMEL_MSG_UID_MSG_TYPE(v)	((v & GENMASK(15, 8)) >> 8)
+
+/** Helper macro to extract the actionID from TMEL_MSG_UID. */
+#define TMEL_MSG_UID_ACTION_ID(v)	(v & GENMASK(7, 0))
+
+/****************************************************************************
+ *
+ * All definitions of supported messageType's.
+ *
+ * 0x00 -> 0xF0 messageType used for production use cases.
+ * 0xF1 -> 0xFF messageType reserved(can be used for test puprposes).
+ *
+ * <Template> : TMEL_MSG_<MSGTYPE_NAME>
+ * **************************************************************************/
+#define TMEL_MSG_SECBOOT		 0x00
+
+/****************************************************************************
+ *
+ * All definitions of action ID's per messageType.
+ *
+ * 0x00 -> 0xBF actionID used for production use cases.
+ * 0xC0 -> 0xFF messageType must be reserved for test use cases.
+ *
+ * NOTE: Test ID's shouldn't appear in this file.
+ *
+ * <Template> : TMEL_ACTION_<MSGTYPE_NAME>_<ACTIONID_NAME>
+ * **************************************************************************/
+
+/*
+ * ----------------------------------------------------------------------------
+		Action ID's for TMEL_MSG_SECBOOT
+ * ------------------------------------------------------------------------
+ */
+#define TMEL_ACTION_SECBOOT_SEC_AUTH		     0x04
+#define TMEL_ACTION_SECBOOT_SS_TEAR_DOWN	     0x0A
+
+/****************************************************************************
+ *
+ * All definitions of TMEL Message UID's (messageType | actionID).
+ *
+ * <Template> : TMEL_MSG_UID_<MSGTYPE_NAME>_<ACTIONID_NAME>
+ * *************************************************************************/
+
+/*----------------------------------------------------------------------------
+ * UID's for TMEL_MSG_SECBOOT
+ *-------------------------------------------------------------------------
+ */
+#define TMEL_MSG_UID_SECBOOT_SEC_AUTH	    TMEL_MSG_UID_CREATE(TMEL_MSG_SECBOOT,\
+					    TMEL_ACTION_SECBOOT_SEC_AUTH)
+
+#define TMEL_MSG_UID_SECBOOT_SS_TEAR_DOWN	TMEL_MSG_UID_CREATE(TMEL_MSG_SECBOOT,\
+						TMEL_ACTION_SECBOOT_SS_TEAR_DOWN)
+
+#define HW_MBOX_SIZE			32
+#define MBOX_QMP_CTRL_DATA_SIZE		4
+#define MBOX_RSV_SIZE			4
+#define MBOX_IPC_PACKET_SIZE		(HW_MBOX_SIZE - MBOX_QMP_CTRL_DATA_SIZE - MBOX_RSV_SIZE)
+#define MBOX_IPC_MAX_PARAMS		5
+
+#define MAX_PARAM_IN_PARAM_ID		14
+#define PARAM_CNT_FOR_PARAM_TYPE_OUTBUF	3
+#define SRAM_IPC_MAX_PARAMS		(MAX_PARAM_IN_PARAM_ID * PARAM_CNT_FOR_PARAM_TYPE_OUTBUF)
+#define SRAM_IPC_MAX_BUF_SIZE		(SRAM_IPC_MAX_PARAMS * sizeof(u32))
+
+#define TMEL_ERROR_GENERIC		(0x1U)
+#define TMEL_ERROR_NOT_SUPPORTED	(0x2U)
+#define TMEL_ERROR_BAD_PARAMETER	(0x3U)
+#define TMEL_ERROR_BAD_MESSAGE		(0x4U)
+#define TMEL_ERROR_BAD_ADDRESS		(0x5U)
+#define TMEL_ERROR_TMELCOM_FAILURE	(0x6U)
+#define TMEL_ERROR_TMEL_BUSY		(0x7U)
+
+enum ipc_type {
+	IPC_MBOX_ONLY,
+	IPC_MBOX_SRAM,
+};
+
+struct ipc_header {
+	u8 ipc_type:1;
+	u8 msg_len:7;
+	u8 msg_type;
+	u8 action_id;
+	s8 response;
+} __packed;
+
+struct mbox_payload {
+	u32 param[MBOX_IPC_MAX_PARAMS];
+};
+
+struct sram_payload {
+	u32 payload_ptr;
+	u32 payload_len;
+};
+
+union ipc_payload {
+	struct mbox_payload mbox_payload;
+	struct sram_payload sram_payload;
+} __packed;
+
+struct tmel_ipc_pkt {
+	struct ipc_header msg_hdr;
+	union ipc_payload payload;
+} __packed;
+
+struct tmel_qmp_msg {
+	void *msg;
+	u32 msg_id;
+};
+
+struct tmel_sec_auth {
+	void *data;
+	u32 size;
+	u32 pas_id;
+};
+#endif  /*_TMELCOM_H_ */