@@ -575,7 +575,9 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ spin_lock_bh(&ctx->req_lock);
ret = hisi_qp_send(ctx->qp, msg);
+ spin_unlock_bh(&ctx->req_lock);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
@@ -213,6 +213,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
+ struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
@@ -244,7 +245,9 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
+ spin_lock_bh(&req_q->req_lock);
ret = hisi_qp_send(qp, &zip_sqe);
+ spin_unlock_bh(&req_q->req_lock);
if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;
Apply for a lock before the qp send operation to ensure no resource race in multi-concurrency situations. This modification has almost no impact on performance. Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> --- v2: - Rebased on top of the merged spin_lock type. --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 2 ++ drivers/crypto/hisilicon/zip/zip_crypto.c | 3 +++ 2 files changed, 5 insertions(+)