@@ -172,6 +172,8 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
+ pr_info("rxe_alloc_pd: called\n");
+
return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
}
@@ -179,6 +181,8 @@ static void rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct rxe_pd *pd = to_rpd(ibpd);
+ pr_info("rxe_dealloc_pd: called\n");
+
rxe_drop_ref(pd);
}
@@ -410,6 +414,8 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
struct rxe_qp *qp;
struct rxe_create_qp_resp __user *uresp = NULL;
+ pr_info("rxe_create_qp: called\n");
+
if (udata) {
if (udata->outlen < sizeof(*uresp))
return ERR_PTR(-EINVAL);
@@ -457,6 +463,8 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct rxe_dev *rxe = to_rdev(ibqp->device);
struct rxe_qp *qp = to_rqp(ibqp);
+ pr_info("rxe_modify_qp: called\n");
+
err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err)
goto err1;
@@ -476,6 +484,8 @@ static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
{
struct rxe_qp *qp = to_rqp(ibqp);
+ pr_info("rxe_query_qp: called\n");
+
rxe_qp_to_init(qp, init);
rxe_qp_to_attr(qp, attr, mask);
@@ -486,6 +496,8 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
struct rxe_qp *qp = to_rqp(ibqp);
+ pr_info("rxe_destroy_qp: called\n");
+
rxe_qp_destroy(qp);
rxe_drop_index(qp);
rxe_drop_ref(qp);
@@ -782,6 +794,8 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_create_cq_resp __user *uresp = NULL;
+ pr_info("rxe_create_cq: called\n");
+
if (udata) {
if (udata->outlen < sizeof(*uresp))
return -EINVAL;
@@ -807,6 +821,8 @@ static void rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
+ pr_info("rxe_destroy_cq: called\n");
+
rxe_cq_disable(cq);
rxe_drop_ref(cq);
@@ -846,6 +862,8 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct rxe_cqe *cqe;
unsigned long flags;
+ pr_info("rxe_poll_cq: called\n");
+
spin_lock_irqsave(&cq->cq_lock, flags);
for (i = 0; i < num_entries; i++) {
cqe = queue_head(cq->queue);
@@ -916,6 +934,8 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_reg_mr_resp __user *uresp = NULL;
+ pr_info("rxe_reg_user_mr: called\n");
+
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
@@ -939,9 +959,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
if (err)
goto err3;
- pr_info("rxe_reg_user_mr: index = 0x%08x, rkey = 0x%08x\n",
- mr->pelem.index, mr->ibmr.rkey);
-
if (uresp) {
if (copy_to_user(&uresp->index, &mr->pelem.index,
sizeof(uresp->index))) {
@@ -964,6 +981,8 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rxe_mr *mr = to_rmr(ibmr);
+ pr_info("rxe_dereg_user_mr: called\n");
+
mr->state = RXE_MEM_STATE_ZOMBIE;
rxe_drop_ref(mr->pd);
rxe_drop_index(mr);
Added some debug prints to help out. They will go away later. Signed-off-by: Bob Pearson <rpearson@hpe.com> --- drivers/infiniband/sw/rxe/rxe_verbs.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-)