@@ -275,7 +275,8 @@ void notify_eq(struct erdma_eq *eq);
void *get_next_valid_eqe(struct erdma_eq *eq);
int erdma_aeq_init(struct erdma_dev *dev);
-void erdma_aeq_destroy(struct erdma_dev *dev);
+int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth);
+void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq);
void erdma_aeq_event_handler(struct erdma_dev *dev);
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
@@ -158,20 +158,13 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_eq *eq = &cmdq->eq;
+ int ret;
- eq->depth = cmdq->max_outstandings;
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- &eq->qbuf_dma_addr, GFP_KERNEL);
- if (!eq->qbuf)
- return -ENOMEM;
-
- spin_lock_init(&eq->lock);
- atomic64_set(&eq->event_num, 0);
+ ret = erdma_eq_common_init(dev, eq, cmdq->max_outstandings);
+ if (ret)
+ return ret;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
- eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
- if (!eq->dbrec)
- goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
@@ -181,12 +174,6 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
return 0;
-
-err_out:
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
- eq->qbuf_dma_addr);
-
- return -ENOMEM;
}
int erdma_cmdq_init(struct erdma_dev *dev)
@@ -247,10 +234,7 @@ void erdma_cmdq_destroy(struct erdma_dev *dev)
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
- dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
- cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
-
- dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
+ erdma_eq_destroy(dev, &cmdq->eq);
dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
@@ -80,50 +80,62 @@ void erdma_aeq_event_handler(struct erdma_dev *dev)
notify_eq(&dev->aeq);
}
-int erdma_aeq_init(struct erdma_dev *dev)
+int erdma_eq_common_init(struct erdma_dev *dev, struct erdma_eq *eq, u32 depth)
{
- struct erdma_eq *eq = &dev->aeq;
+ u32 buf_size = depth << EQE_SHIFT;
- eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
-
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- &eq->qbuf_dma_addr, GFP_KERNEL);
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, buf_size,
+ &eq->qbuf_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!eq->qbuf)
return -ENOMEM;
+ eq->dbrec = dma_pool_alloc(dev->db_pool, GFP_KERNEL | __GFP_ZERO,
+ &eq->dbrec_dma);
+ if (!eq->dbrec)
+ goto err_free_qbuf;
+
spin_lock_init(&eq->lock);
atomic64_set(&eq->event_num, 0);
atomic64_set(&eq->notify_num, 0);
-
- eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
- eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
- if (!eq->dbrec)
- goto err_out;
-
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
- upper_32_bits(eq->qbuf_dma_addr));
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
- lower_32_bits(eq->qbuf_dma_addr));
- erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
- erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
+ eq->ci = 0;
+ eq->depth = depth;
return 0;
-err_out:
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
+err_free_qbuf:
+ dma_free_coherent(&dev->pdev->dev, buf_size, eq->qbuf,
eq->qbuf_dma_addr);
return -ENOMEM;
}
-void erdma_aeq_destroy(struct erdma_dev *dev)
+void erdma_eq_destroy(struct erdma_dev *dev, struct erdma_eq *eq)
{
- struct erdma_eq *eq = &dev->aeq;
-
+ dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
+}
- dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
+int erdma_aeq_init(struct erdma_dev *dev)
+{
+ struct erdma_eq *eq = &dev->aeq;
+ int ret;
+
+ ret = erdma_eq_common_init(dev, &dev->aeq, ERDMA_DEFAULT_EQ_DEPTH);
+ if (ret)
+ return ret;
+
+ eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
+
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
+ upper_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
+ lower_32_bits(eq->qbuf_dma_addr));
+ erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
+
+ return 0;
}
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
@@ -234,32 +246,21 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
int ret;
- eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
- eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- &eq->qbuf_dma_addr, GFP_KERNEL);
- if (!eq->qbuf)
- return -ENOMEM;
-
- spin_lock_init(&eq->lock);
- atomic64_set(&eq->event_num, 0);
- atomic64_set(&eq->notify_num, 0);
+ ret = erdma_eq_common_init(dev, eq, ERDMA_DEFAULT_EQ_DEPTH);
+ if (ret)
+ return ret;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
(ceqn + 1) * ERDMA_DB_SIZE;
-
- eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
- if (!eq->dbrec) {
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
- eq->qbuf, eq->qbuf_dma_addr);
- return -ENOMEM;
- }
-
- eq->ci = 0;
dev->ceqs[ceqn].dev = dev;
+ dev->ceqs[ceqn].ready = true;
/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
ret = create_eq_cmd(dev, ceqn + 1, eq);
- dev->ceqs[ceqn].ready = ret ? false : true;
+ if (ret) {
+ erdma_eq_destroy(dev, eq);
+ dev->ceqs[ceqn].ready = false;
+ }
return ret;
}
@@ -283,9 +284,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
if (err)
return;
- dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
- eq->qbuf_dma_addr);
- dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
+ erdma_eq_destroy(dev, eq);
}
int erdma_ceqs_init(struct erdma_dev *dev)
@@ -359,7 +359,7 @@ static int erdma_probe_dev(struct pci_dev *pdev)
erdma_cmdq_destroy(dev);
err_uninit_aeq:
- erdma_aeq_destroy(dev);
+ erdma_eq_destroy(dev, &dev->aeq);
err_uninit_comm_irq:
erdma_comm_irq_uninit(dev);
@@ -392,7 +392,7 @@ static void erdma_remove_dev(struct pci_dev *pdev)
erdma_ceqs_uninit(dev);
erdma_hw_reset(dev, false);
erdma_cmdq_destroy(dev);
- erdma_aeq_destroy(dev);
+ erdma_eq_destroy(dev, &dev->aeq);
erdma_comm_irq_uninit(dev);
pci_free_irq_vectors(dev->pdev);
erdma_device_uninit(dev);
We extracted the common parts of the initialization/destruction process to make the code cleaner. Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com> --- drivers/infiniband/hw/erdma/erdma.h | 3 +- drivers/infiniband/hw/erdma/erdma_cmdq.c | 26 ++----- drivers/infiniband/hw/erdma/erdma_eq.c | 91 ++++++++++++------------ drivers/infiniband/hw/erdma/erdma_main.c | 4 +- 4 files changed, 54 insertions(+), 70 deletions(-)