@@ -355,10 +355,9 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
int ret;
- ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mem_obj, NULL);
if (ret)
@@ -374,12 +373,11 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
int ret;
u64 atomic_orig = atmack_orig(pkt);
- ret = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE,
+ ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mem_obj, NULL);
if (ret)
@@ -106,20 +106,20 @@ enum copy_direction {
from_mem_obj,
};
-int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem);
-int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mr);
-int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem);
int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
int length, enum copy_direction dir, u32 *crcp);
-int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
+int copy_data(struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
enum copy_direction dir, u32 *crcp);
@@ -107,7 +107,7 @@ void rxe_mem_cleanup(struct rxe_pool_entry *arg)
}
}
-static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
+static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
{
int i;
int num_map;
@@ -145,7 +145,7 @@ static int rxe_mem_alloc(struct rxe_dev *rxe, struct rxe_mem *mem, int num_buf)
return -ENOMEM;
}
-int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_dma(struct rxe_pd *pd,
int access, struct rxe_mem *mem)
{
rxe_mem_init(access, mem);
@@ -158,7 +158,7 @@ int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
return 0;
}
-int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
+int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mem)
{
@@ -184,7 +184,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
rxe_mem_init(access, mem);
- err = rxe_mem_alloc(rxe, mem, num_buf);
+ err = rxe_mem_alloc(mem, num_buf);
if (err) {
pr_warn("err %d from rxe_mem_alloc\n", err);
ib_umem_release(umem);
@@ -236,7 +236,7 @@ int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
return err;
}
-int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
+int rxe_mem_init_fast(struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem)
{
int err;
@@ -246,7 +246,7 @@ int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
/* In fastreg, we also set the rkey */
mem->ibmr.rkey = mem->ibmr.lkey;
- err = rxe_mem_alloc(rxe, mem, max_pages);
+ err = rxe_mem_alloc(mem, max_pages);
if (err)
goto err1;
@@ -434,7 +434,6 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
* under the control of a dma descriptor
*/
int copy_data(
- struct rxe_dev *rxe,
struct rxe_pd *pd,
int access,
struct rxe_dma_info *dma,
@@ -490,7 +490,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
wqe->dma.resid -= paylen;
wqe->dma.sge_offset += paylen;
} else {
- err = copy_data(rxe, qp->pd, 0, &wqe->dma,
+ err = copy_data(qp->pd, 0, &wqe->dma,
payload_addr(pkt), paylen,
from_mem_obj,
&crc);
@@ -511,9 +511,8 @@ static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
int data_len)
{
int err;
- struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
+ err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
data_addr, data_len, to_mem_obj, NULL);
if (unlikely(err))
return (err == -ENOSPC) ? RESPST_ERR_LENGTH
@@ -1011,7 +1011,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_add_ref(pd);
- err = rxe_mem_init_dma(rxe, pd, access, mr);
+ err = rxe_mem_init_dma(pd, access, mr);
if (err)
goto err2;
@@ -1046,7 +1046,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mem_init_user(rxe, pd, start, length, iova,
+ err = rxe_mem_init_user(pd, start, length, iova,
access, udata, mr);
if (err)
goto err3;
@@ -1094,7 +1094,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
- err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
+ err = rxe_mem_init_fast(pd, max_num_sg, mr);
if (err)
goto err2;
In the functions rxe_mem_init_dma, rxe_mem_init_user, rxe_mem_init_fast and copy_data, the function variable rxe is not used. So this function variable rxe is removed. CC: Srinivas Eeda <srinivas.eeda@oracle.com> CC: Junxiao Bi <junxiao.bi@oracle.com> Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com> --- drivers/infiniband/sw/rxe/rxe_comp.c | 6 ++---- drivers/infiniband/sw/rxe/rxe_loc.h | 8 ++++---- drivers/infiniband/sw/rxe/rxe_mr.c | 13 ++++++------- drivers/infiniband/sw/rxe/rxe_req.c | 2 +- drivers/infiniband/sw/rxe/rxe_resp.c | 3 +-- drivers/infiniband/sw/rxe/rxe_verbs.c | 6 +++--- 6 files changed, 17 insertions(+), 21 deletions(-)