@@ -52,29 +52,116 @@ static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
} while (res->a0 == QCOM_SCM_INTERRUPTED);
}
-static void __scm_smc_do(const struct arm_smccc_args *smc,
- struct arm_smccc_res *res, bool atomic)
+static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
{
- int retry_count = 0;
+ memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args));
+
+ resume->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_RESUME));
+
+ resume->args[1] = QCOM_SCM_ARGS(1);
+
+ resume->args[2] = smc_call_ctx;
+}
+
+static void fill_get_wq_ctx_args(struct arm_smccc_args *get_wq_ctx)
+{
+ memset(get_wq_ctx->args, 0, sizeof(get_wq_ctx->args[0]) * ARRAY_SIZE(get_wq_ctx->args));
+
+ get_wq_ctx->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
+ ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
+ SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
+}
+
+int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
+{
+ int ret;
+ struct arm_smccc_args get_wq_ctx = {0};
+ struct arm_smccc_res get_wq_res;
+
+ fill_get_wq_ctx_args(&get_wq_ctx);
+
+ /* Guaranteed to return only success or error, no WAITQ_* */
+ __scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
+ ret = get_wq_res.a0;
+ if (ret)
+ return ret;
+
+ *wq_ctx = get_wq_res.a1;
+ *flags = get_wq_res.a2;
+ *more_pending = get_wq_res.a3;
+
+ return 0;
+}
+
+static int scm_smc_do_quirk(struct device *dev, struct arm_smccc_args *smc,
+ struct arm_smccc_res *res)
+{
+ struct completion *wq = NULL;
+ struct qcom_scm *qscm;
+ u32 wq_ctx, smc_call_ctx, flags;
+
+ do {
+ __scm_smc_do_quirk(smc, res);
+
+ if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
+ wq_ctx = res->a1;
+ smc_call_ctx = res->a2;
+ flags = res->a3;
+
+ if (!dev)
+ return -EPROBE_DEFER;
+
+ qscm = dev_get_drvdata(dev);
+ wq = qcom_scm_lookup_wq(qscm, wq_ctx);
+ if (IS_ERR_OR_NULL(wq)) {
+ pr_err("No waitqueue found for wq_ctx %d: %ld\n",
+ wq_ctx, PTR_ERR(wq));
+ return PTR_ERR(wq);
+ }
+
+ wait_for_completion(wq);
+ fill_wq_resume_args(smc, smc_call_ctx);
+ wq = NULL;
+ continue;
+
+ } else if (!res->a0 || (long)res->a0 < 0) {
+ break;
+ }
+ } while (res->a0 == QCOM_SCM_WAITQ_SLEEP);
+
+ return 0;
+}
+
+static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
+ struct arm_smccc_res *res, bool atomic)
+{
+ int ret, retry_count = 0;
if (atomic) {
__scm_smc_do_quirk(smc, res);
- return;
+ return 0;
}
do {
mutex_lock(&qcom_scm_lock);
- __scm_smc_do_quirk(smc, res);
+ ret = scm_smc_do_quirk(dev, smc, res);
mutex_unlock(&qcom_scm_lock);
+ if (ret)
+ return ret;
+
if (res->a0 == QCOM_SCM_V2_EBUSY) {
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
break;
msleep(QCOM_SCM_EBUSY_WAIT_MS);
}
} while (res->a0 == QCOM_SCM_V2_EBUSY);
+
+ return 0;
}
@@ -83,7 +170,7 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
struct qcom_scm_res *res, bool atomic)
{
int arglen = desc->arginfo & 0xf;
- int i;
+ int i, ret;
dma_addr_t args_phys = 0;
void *args_virt = NULL;
size_t alloc_len;
@@ -135,19 +222,24 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
}
- __scm_smc_do(&smc, &smc_res, atomic);
+ /* ret error check follows after args_virt cleanup*/
+ ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
if (args_virt) {
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
kfree(args_virt);
}
+ if (ret)
+ return ret;
+
if (res) {
res->result[0] = smc_res.a1;
res->result[1] = smc_res.a2;
res->result[2] = smc_res.a3;
}
- return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+ ret = (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
+ return ret;
}
@@ -3,7 +3,9 @@
* Copyright (C) 2015 Linaro Ltd.
*/
#include <linux/platform_device.h>
+#include <linux/idr.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/dma-mapping.h>
@@ -13,9 +15,12 @@
#include <linux/qcom_scm.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
#include <linux/arm-smccc.h>
#include "qcom_scm.h"
@@ -27,6 +32,13 @@ module_param(download_mode, bool, 0);
#define SCM_HAS_IFACE_CLK BIT(1)
#define SCM_HAS_BUS_CLK BIT(2)
+struct qcom_scm_waitq {
+ struct idr idr;
+ /* control access to IDR */
+ spinlock_t idr_lock;
+ struct work_struct scm_irq_work;
+};
+
struct qcom_scm {
struct device *dev;
struct clk *core_clk;
@@ -34,6 +46,7 @@ struct qcom_scm {
struct clk *bus_clk;
struct icc_path *path;
struct reset_controller_dev reset;
+ struct qcom_scm_waitq waitq;
/* control access to the interconnect path */
struct mutex scm_bw_lock;
@@ -63,6 +76,9 @@ static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
BIT(2), BIT(1), BIT(4), BIT(6)
};
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0)
+#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1)
+
static const char * const qcom_scm_convention_names[] = {
[SMC_CONVENTION_UNKNOWN] = "unknown",
[SMC_CONVENTION_ARM_32] = "smc arm 32",
@@ -1325,11 +1341,92 @@ bool qcom_scm_is_available(void)
}
EXPORT_SYMBOL(qcom_scm_is_available);
+struct completion *qcom_scm_lookup_wq(struct qcom_scm *scm, u32 wq_ctx)
+{
+ struct completion *wq = NULL;
+ u32 wq_ctx_idr = wq_ctx;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&scm->waitq.idr_lock, flags);
+ wq = idr_find(&scm->waitq.idr, wq_ctx);
+ if (wq)
+ goto out;
+
+ wq = devm_kzalloc(scm->dev, sizeof(*wq), GFP_ATOMIC);
+ if (!wq) {
+ wq = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ init_completion(wq);
+
+ err = idr_alloc_u32(&scm->waitq.idr, wq, &wq_ctx_idr,
+ U32_MAX, GFP_ATOMIC);
+ if (err < 0) {
+ devm_kfree(scm->dev, wq);
+ wq = ERR_PTR(err);
+ }
+
+out:
+ spin_unlock_irqrestore(&scm->waitq.idr_lock, flags);
+ return wq;
+}
+
+void scm_waitq_flag_handler(struct completion *wq, u32 flags)
+{
+ switch (flags) {
+ case QCOM_SMC_WAITQ_FLAG_WAKE_ONE:
+ complete(wq);
+ break;
+ case QCOM_SMC_WAITQ_FLAG_WAKE_ALL:
+ complete_all(wq);
+ break;
+ default:
+ pr_err("invalid flags: %u\n", flags);
+ }
+}
+
+static void scm_irq_work(struct work_struct *work)
+{
+ int ret;
+ u32 wq_ctx, flags, more_pending = 0;
+ struct completion *wq_to_wake;
+ struct qcom_scm_waitq *w = container_of(work, struct qcom_scm_waitq, scm_irq_work);
+ struct qcom_scm *scm = container_of(w, struct qcom_scm, waitq);
+
+ do {
+ ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
+ if (ret) {
+ pr_err("GET_WQ_CTX SMC call failed: %d\n", ret);
+ return;
+ }
+
+ wq_to_wake = qcom_scm_lookup_wq(scm, wq_ctx);
+ if (IS_ERR_OR_NULL(wq_to_wake)) {
+ pr_err("No waitqueue found for wq_ctx %d: %ld\n",
+ wq_ctx, PTR_ERR(wq_to_wake));
+ return;
+ }
+
+ scm_waitq_flag_handler(wq_to_wake, flags);
+ } while (more_pending);
+}
+
+static irqreturn_t qcom_scm_irq_handler(int irq, void *p)
+{
+ struct qcom_scm *scm = p;
+
+ schedule_work(&scm->waitq.scm_irq_work);
+
+ return IRQ_HANDLED;
+}
+
static int qcom_scm_probe(struct platform_device *pdev)
{
struct qcom_scm *scm;
unsigned long clks;
- int ret;
+ int irq, ret;
scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
if (!scm)
@@ -1399,9 +1496,29 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
+ platform_set_drvdata(pdev, scm);
+
__scm = scm;
__scm->dev = &pdev->dev;
+ spin_lock_init(&__scm->waitq.idr_lock);
+ idr_init(&__scm->waitq.idr);
+ INIT_WORK(&__scm->waitq.scm_irq_work, scm_irq_work);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ if (irq != -ENXIO)
+ return irq;
+ } else {
+ ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
+ IRQF_ONESHOT, "qcom-scm", __scm);
+ if (ret < 0) {
+ pr_err("Failed to request qcom-scm irq: %d\n", ret);
+ idr_destroy(&__scm->waitq.idr);
+ return ret;
+ }
+ }
+
__get_convention();
/*
@@ -1417,6 +1534,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
static void qcom_scm_shutdown(struct platform_device *pdev)
{
+ idr_destroy(&__scm->waitq.idr);
/* Clean shutdown, disable download mode to allow normal restart */
if (download_mode)
qcom_scm_set_download_mode(false);
@@ -60,6 +60,11 @@ struct qcom_scm_res {
u64 result[MAX_QCOM_SCM_RETS];
};
+struct qcom_scm;
+extern struct completion *qcom_scm_lookup_wq(struct qcom_scm *scm, u32 wq_ctx);
+extern void scm_waitq_flag_handler(struct completion *wq, u32 flags);
+extern int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending);
+
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
enum qcom_scm_convention qcom_convention,
@@ -129,6 +134,10 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
+#define QCOM_SCM_SVC_WAITQ 0x24
+#define QCOM_SCM_WAITQ_RESUME 0x02
+#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
@@ -137,6 +146,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_EINVAL_ARG -2
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_WAITQ_SLEEP 2
static inline int qcom_scm_remap_error(int err)
{