@@ -478,7 +478,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->c.wr_cnt) % sess->queue_depth ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
0 : IB_SEND_SIGNALED;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
@@ -680,6 +680,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_RDMA_WRITE:
/*
* post_send() RDMA write completions of IO reqs (read/write)
+ * and hb.
*/
break;
@@ -1043,7 +1044,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->c.wr_cnt) % sess->queue_depth ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
0 : IB_SEND_SIGNALED;
ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
@@ -1849,6 +1850,8 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
return -ENOMEM;
}
sess->queue_depth = queue_depth;
+ sess->s.signal_interval = min_not_zero(queue_depth,
+ (unsigned short) SERVICE_CON_QUEUE_DEPTH);
sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
sess->max_io_size = le32_to_cpu(msg->max_io_size);
sess->flags = le32_to_cpu(msg->flags);
@@ -109,6 +109,7 @@ struct rtrs_sess {
unsigned int con_num;
unsigned int irq_con_num;
unsigned int recon_cnt;
+ unsigned int signal_interval;
struct rtrs_ib_dev *dev;
int dev_ref;
struct ib_cqe *hb_cqe;
@@ -201,7 +201,6 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
struct rtrs_srv_sess *sess = to_srv_sess(s);
dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
- struct rtrs_srv *srv = sess->srv;
struct ib_send_wr inv_wr;
struct ib_rdma_wr imm_wr;
struct ib_rdma_wr *wr = NULL;
@@ -269,7 +268,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
* From time to time we have to post signaled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = (atomic_inc_return(&id->con->c.wr_cnt) % srv->queue_depth) ?
+ flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
0 : IB_SEND_SIGNALED;
if (need_inval) {
@@ -347,7 +346,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct ib_send_wr inv_wr, *wr = NULL;
struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
- struct rtrs_srv *srv = sess->srv;
struct rtrs_srv_mr *srv_mr;
bool need_inval = false;
enum ib_send_flags flags;
@@ -396,7 +394,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = (atomic_inc_return(&con->c.wr_cnt) % srv->queue_depth) ?
+ flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
0 : IB_SEND_SIGNALED;
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
imm_wr.wr.next = NULL;
@@ -1268,8 +1266,9 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
case IB_WC_SEND:
/*
* post_send() RDMA write completions of IO reqs (read/write)
+ * and hb.
*/
- atomic_add(srv->queue_depth, &con->sq_wr_avail);
+ atomic_add(s->signal_interval, &con->sq_wr_avail);
if (unlikely(!list_empty_careful(&con->rsp_wr_wait_list)))
rtrs_rdma_process_wr_wait_list(con);
@@ -1659,6 +1658,8 @@ static int create_con(struct rtrs_srv_sess *sess,
max_send_wr = min_t(int, wr_limit,
SERVICE_CON_QUEUE_DEPTH * 2 + 2);
max_recv_wr = max_send_wr;
+ s->signal_interval = min_not_zero(srv->queue_depth,
+ (size_t)SERVICE_CON_QUEUE_DEPTH);
} else {
/* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
if (always_invalidate)
@@ -187,10 +187,15 @@ int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_cqe *cqe,
struct ib_send_wr *head)
{
struct ib_rdma_wr wr;
+ struct rtrs_sess *sess = con->sess;
+ enum ib_send_flags sflags;
+
+ sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ?
+ 0 : IB_SEND_SIGNALED;
wr = (struct ib_rdma_wr) {
.wr.wr_cqe = cqe,
- .wr.send_flags = flags,
+ .wr.send_flags = sflags,
.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
.wr.ex.imm_data = cpu_to_be32(imm_data),
};