@@ -49,7 +49,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
return -EINVAL;
}
- spin_lock_bh(&ss->slock);
+ /*spin_lock_bh(&ss->slock);*/
for (i = 0; i < op->keylen; i += 4)
writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
@@ -110,12 +110,12 @@ release_ss:
sg_miter_stop(&mi);
sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
- spin_unlock_bh(&ss->slock);
+ /*spin_unlock_bh(&ss->slock);*/
return err;
}
/* Generic function that support SG with size not multiple of 4 */
-static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
@@ -174,7 +174,7 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
if (no_chunk == 1)
return sun4i_ss_opti_poll(areq);
- spin_lock_bh(&ss->slock);
+ /*spin_lock_bh(&ss->slock);*/
for (i = 0; i < op->keylen; i += 4)
writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
@@ -295,7 +295,7 @@ release_ss:
sg_miter_stop(&mi);
sg_miter_stop(&mo);
writel(0, ss->base + SS_CTL);
- spin_unlock_bh(&ss->slock);
+ /*spin_unlock_bh(&ss->slock);*/
return err;
}
@@ -309,6 +309,7 @@ int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
op->keymode;
+ return sun4i_ss_enqueue(&areq->base);
return sun4i_ss_cipher_poll(areq);
}
@@ -320,6 +321,7 @@ int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
op->keymode;
+ return sun4i_ss_enqueue(&areq->base);
return sun4i_ss_cipher_poll(areq);
}
@@ -22,10 +22,79 @@
#include <linux/scatterlist.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/kthread.h>
#include "sun4i-ss.h"
-static struct sun4i_ss_alg_template driver_algs[] = {
+int sun4i_ss_enqueue(struct crypto_async_request *areq)
+{
+ struct ablkcipher_request *abreq = ablkcipher_request_cast(areq);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(abreq);
+ struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+ int ret;
+
+ spin_lock_bh(&op->ss->slock);
+ ret = crypto_enqueue_request(&op->ss->queue, areq);
+ spin_unlock_bh(&op->ss->slock);
+ if (ret != -EINPROGRESS)
+ return ret;
+
+ wake_up_process(op->ss->thread);
+
+ return -EINPROGRESS;
+}
+
+static int sun4i_ss_thread(void *data) {
+ struct crypto_async_request *backlog;
+ struct crypto_async_request *arq;
+ struct sun4i_ss_ctx *ss = data;
+ u32 rtype;
+ struct ablkcipher_request *areq;
+
+ int ret;
+
+ do {
+ __set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_bh(&ss->slock);
+ backlog = crypto_get_backlog(&ss->queue);
+ arq = crypto_dequeue_request(&ss->queue);
+ spin_unlock_bh(&ss->slock);
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ if (arq) {
+ rtype = crypto_tfm_alg_type(arq->tfm);
+ switch (rtype) {
+ /*
+ case CRYPTO_ALG_TYPE_AHASH:
+ struct ahash_request *areq = ahash_request_cast(arq);
+ ret = -1;
+ arq->complete(arq, ret);
+ break;
+ */
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ areq = ablkcipher_request_cast(arq);
+ ret = sun4i_ss_cipher_poll(areq);
+ /*pr_info("task cipher %d %d %d %u\n", ret,
+ sg_nents(areq->src), sg_nents(areq->dst), areq->nbytes);*/
+ /* we are in a thread and complete must be called with softirq off */
+ local_bh_disable();
+ arq->complete(arq, ret);
+ local_bh_enable();
+ break;
+ default:
+ dev_err(ss->dev, "ERROR: invalid request\n");
+ arq->complete(arq, -EINVAL);
+ }
+ } else {
+ schedule();
+ }
+ } while (!kthread_should_stop());
+ return 0;
+}
+
+static struct sun4i_ss_alg_template driver_algs[] = {/*
{ .type = CRYPTO_ALG_TYPE_AHASH,
.alg.hash = {
.init = sun4i_hash_init,
@@ -77,14 +146,14 @@ static struct sun4i_ss_alg_template driver_algs[] = {
}
}
}
-},
+},*/
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-sun4i-ss",
.cra_priority = 300,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
@@ -99,7 +168,7 @@ static struct sun4i_ss_alg_template driver_algs[] = {
.decrypt = sun4i_ss_cbc_aes_decrypt,
}
}
-},
+},/*
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
.alg.crypto = {
.cra_name = "ecb(aes)",
@@ -208,7 +277,7 @@ static struct sun4i_ss_alg_template driver_algs[] = {
.decrypt = sun4i_ss_ecb_des3_decrypt,
}
}
-},
+},*/
};
static int sun4i_ss_probe(struct platform_device *pdev)
@@ -313,8 +382,16 @@ static int sun4i_ss_probe(struct platform_device *pdev)
ss->dev = &pdev->dev;
+ crypto_init_queue(&ss->queue, 50);
+
spin_lock_init(&ss->slock);
+ ss->thread = kthread_run(sun4i_ss_thread, ss, "sun4i_sskd");
+ if (IS_ERR(ss->thread)) {
+ err = PTR_ERR(ss->thread);
+ goto error_thread;
+ }
+
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
driver_algs[i].ss = ss;
switch (driver_algs[i].type) {
@@ -347,6 +424,8 @@ error_alg:
break;
}
}
+error_thread:
+ kthread_stop(ss->thread);
error_clk:
clk_disable_unprepare(ss->ssclk);
error_ssclk:
@@ -359,6 +438,8 @@ static int sun4i_ss_remove(struct platform_device *pdev)
int i;
struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev);
+ kthread_stop(ss->thread);
+
sun4i_ss_hwrng_remove(&ss->hwrng);
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
@@ -132,6 +132,8 @@ struct sun4i_ss_ctx {
struct device *dev;
struct resource *res;
spinlock_t slock; /* control the use of the device */
+ struct crypto_queue queue;
+ struct task_struct *thread;
struct hwrng hwrng;
u32 seed[SS_SEED_LEN / 4];
};
@@ -165,6 +167,9 @@ struct sun4i_req_ctx {
struct sun4i_ss_ctx *ss;
};
+int sun4i_ss_enqueue(struct crypto_async_request *areq);
+int sun4i_ss_cipher_poll(struct ablkcipher_request *areq);
+
int sun4i_hash_crainit(struct crypto_tfm *tfm);
int sun4i_hash_init(struct ahash_request *areq);
int sun4i_hash_update(struct ahash_request *areq);