@@ -14,10 +14,35 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/reset.h>
+/*
+ * RK339 has 2 crypto IP named crypto0/crypto1
+ * crypto0 is the main crypto instance, it registers crypto algorithm and debugfs
+ * crypto1 will wait that crypto0 finish to initialize (waiting the main
+ * variable below is set) and then will register as a subdevice
+ */
+static struct rk_crypto_info *main;
+
+static const struct rk_variant rk3288_variant = {
+ .main = true,
+};
+
+static const struct rk_variant rk3328_variant = {
+ .main = true,
+};
+
+static const struct rk_variant rk3399_variant0 = {
+ .main = true,
+};
+
+static const struct rk_variant rk3399_variant1 = {
+ .sub = true,
+};
+
static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
{
int err;
@@ -113,8 +138,14 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
{
+ struct rk_crypto_info *rk = seq->private;
unsigned int i;
+ if (rk->sub) {
+ seq_printf(seq, "Main device requests: %lu\n", rk->nreq);
+ seq_printf(seq, "Sub-device requests: %lu\n", rk->sub->nreq);
+ }
+
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (!rk_cipher_algs[i]->dev)
continue;
@@ -150,6 +181,11 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
unsigned int i, k;
int err = 0;
+ if (!crypto_info->variant->main) {
+ dev_info(crypto_info->dev, "We are not main, do not register algos\n");
+ return 0;
+ }
+
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
switch (rk_cipher_algs[i]->type) {
@@ -183,10 +219,15 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
return err;
}
-static void rk_crypto_unregister(void)
+static void rk_crypto_unregister(struct rk_crypto_info *crypto_info)
{
unsigned int i;
+ if (!crypto_info->variant->main) {
+ dev_info(crypto_info->dev, "We are not main, do not unregister algos\n");
+ return;
+ }
+
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
@@ -196,8 +237,18 @@ static void rk_crypto_unregister(void)
}
static const struct of_device_id crypto_of_id_table[] = {
- { .compatible = "rockchip,rk3288-crypto" },
- { .compatible = "rockchip,rk3328-crypto" },
+ { .compatible = "rockchip,rk3288-crypto",
+ .data = &rk3288_variant,
+ },
+ { .compatible = "rockchip,rk3328-crypto",
+ .data = &rk3328_variant,
+ },
+ { .compatible = "rockchip,rk3399-crypto0",
+ .data = &rk3399_variant0,
+ },
+ { .compatible = "rockchip,rk3399-crypto1",
+ .data = &rk3399_variant1,
+ },
{}
};
MODULE_DEVICE_TABLE(of, crypto_of_id_table);
@@ -215,7 +266,18 @@ static int rk_crypto_probe(struct platform_device *pdev)
goto err_crypto;
}
- crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
+ crypto_info->variant = of_device_get_match_data(&pdev->dev);
+ if (!crypto_info->variant) {
+ dev_err(&pdev->dev, "Missing variant\n");
+ return -EINVAL;
+ }
+
+ if (crypto_info->variant->sub && !main) {
+ dev_info(&pdev->dev, "Main is not here yet\n");
+ return -EPROBE_DEFER;
+ }
+
+ crypto_info->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(crypto_info->rst)) {
err = PTR_ERR(crypto_info->rst);
goto err_crypto;
@@ -268,15 +330,24 @@ static int rk_crypto_probe(struct platform_device *pdev)
}
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
- /* Ignore error of debugfs */
- crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
- crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444,
- crypto_info->dbgfs_dir,
- crypto_info,
- &rk_crypto_debugfs_fops);
+ if (crypto_info->variant->main) {
+ /* Ignore error of debugfs */
+ crypto_info->dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
+ crypto_info->dbgfs_stats = debugfs_create_file("stats", 0444,
+ crypto_info->dbgfs_dir,
+ crypto_info,
+ &rk_crypto_debugfs_fops);
+ }
#endif
- dev_info(dev, "Crypto Accelerator successfully registered\n");
+ if (crypto_info->variant->main) {
+ dev_info(dev, "Crypto Accelerator main successfully registered\n");
+ main = crypto_info;
+ } else {
+ dev_info(dev, "Crypto Accelerator sub successfully registered\n");
+ main->sub = crypto_info;
+ }
+
return 0;
err_register_alg:
@@ -295,7 +366,7 @@ static int rk_crypto_remove(struct platform_device *pdev)
#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
debugfs_remove_recursive(crypto_tmp->dbgfs_dir);
#endif
- rk_crypto_unregister();
+ rk_crypto_unregister(crypto_tmp);
rk_crypto_pm_exit(crypto_tmp);
crypto_engine_exit(crypto_tmp->engine);
return 0;
@@ -188,6 +188,11 @@
#define CRYPTO_WRITE(dev, offset, val) \
writel_relaxed((val), ((dev)->reg + (offset)))
+struct rk_variant {
+ bool main;
+ bool sub;
+};
+
struct rk_crypto_info {
struct device *dev;
struct clk_bulk_data *clks;
@@ -195,7 +200,10 @@ struct rk_crypto_info {
struct reset_control *rst;
void __iomem *reg;
int irq;
-
+ const struct rk_variant *variant;
+ struct rk_crypto_info *sub;
+ atomic_t flow;
+ unsigned long nreq;
struct crypto_engine *engine;
struct completion complete;
int status;
@@ -78,12 +78,10 @@ static int zero_message_process(struct ahash_request *req)
return 0;
}
-static void rk_ahash_reg_init(struct ahash_request *req)
+static void rk_ahash_reg_init(struct ahash_request *req,
+ struct rk_crypto_info *dev)
{
struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
- struct rk_crypto_info *dev = tctx->main;
int reg_status;
reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
@@ -203,6 +201,7 @@ static int rk_ahash_digest(struct ahash_request *req)
struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
struct rk_crypto_info *dev = tctx->main;
+ struct crypto_engine *engine;
if (rk_ahash_need_fallback(req))
return rk_ahash_digest_fb(req);
@@ -210,9 +209,13 @@ static int rk_ahash_digest(struct ahash_request *req)
if (!req->nbytes)
return zero_message_process(req);
+ if (dev->sub && atomic_inc_return(&dev->flow) % 2)
+ dev = dev->sub;
+
rctx->dev = dev;
+ engine = dev->engine;
- return crypto_transfer_hash_request_to_engine(dev->engine, req);
+ return crypto_transfer_hash_request_to_engine(engine, req);
}
static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
@@ -270,6 +273,7 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
rctx->mode = 0;
algt->stat_req++;
+ rkc->nreq++;
switch (crypto_ahash_digestsize(tfm)) {
case SHA1_DIGEST_SIZE:
@@ -286,7 +290,7 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
goto theend;
}
- rk_ahash_reg_init(areq);
+ rk_ahash_reg_init(areq, rkc);
while (sg) {
reinit_completion(&rkc->complete);
@@ -360,6 +364,10 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
err = pm_runtime_resume_and_get(tctx->main->dev);
if (err < 0)
goto error_pm;
+ if (tctx->main->sub)
+ err = pm_runtime_resume_and_get(tctx->main->sub->dev);
+ if (err < 0)
+ goto error_pm;
return 0;
error_pm:
@@ -374,6 +382,8 @@ static void rk_cra_hash_exit(struct crypto_tfm *tfm)
crypto_free_ahash(tctx->fallback_tfm);
pm_runtime_put_autosuspend(tctx->main->dev);
+ if (tctx->main->sub)
+ pm_runtime_put_autosuspend(tctx->main->sub->dev);
}
struct rk_crypto_tmp rk_ahash_sha1 = {
@@ -98,11 +98,15 @@ static int rk_cipher_handle_req(struct skcipher_request *req)
struct rk_cipher_ctx *tctx = crypto_skcipher_ctx(tfm);
struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
struct rk_crypto_info *rkc = tctx->main;
- struct crypto_engine *engine = rkc->engine;
+ struct crypto_engine *engine;
if (rk_cipher_need_fallback(req))
return rk_cipher_fallback(req);
+ if (rkc->sub && atomic_inc_return(&rkc->flow) % 2)
+ rkc = rkc->sub;
+
+ engine = rkc->engine;
rctx->dev = rkc;
return crypto_transfer_skcipher_request_to_engine(engine, req);
@@ -319,6 +323,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
struct rk_crypto_info *rkc = rctx->dev;
algt->stat_req++;
+ rkc->nreq++;
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
@@ -456,6 +461,10 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
err = pm_runtime_resume_and_get(ctx->main->dev);
if (err < 0)
goto error_pm;
+ if (ctx->main->sub)
+ err = pm_runtime_resume_and_get(ctx->main->sub->dev);
+ if (err < 0)
+ goto error_pm;
return 0;
error_pm:
@@ -470,6 +479,8 @@ static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
memzero_explicit(ctx->key, ctx->keylen);
crypto_free_skcipher(ctx->fallback_tfm);
pm_runtime_put_autosuspend(ctx->main->dev);
+ if (ctx->main->sub)
+ pm_runtime_put_autosuspend(ctx->main->sub->dev);
}
struct rk_crypto_tmp rk_ecb_aes_alg = {