@@ -54,7 +54,8 @@ enum bnxt_re_wr_opcode {
BNXT_RE_WR_OPCD_ATOMIC_FA = 0x0B,
BNXT_RE_WR_OPCD_LOC_INVAL = 0x0C,
BNXT_RE_WR_OPCD_BIND = 0x0E,
- BNXT_RE_WR_OPCD_RECV = 0x80
+ BNXT_RE_WR_OPCD_RECV = 0x80,
+ BNXT_RE_WR_OPCD_INVAL = 0xFF
};
enum bnxt_re_wr_flags {
@@ -236,9 +236,15 @@ static inline uint8_t bnxt_re_ibv_to_bnxt_wr_opcd(uint8_t ibv_opcd)
case IBV_WR_RDMA_READ:
bnxt_opcd = BNXT_RE_WR_OPCD_RDMA_READ;
break;
+ case IBV_WR_ATOMIC_CMP_AND_SWP:
+ bnxt_opcd = BNXT_RE_WR_OPCD_ATOMIC_CS;
+ break;
+ case IBV_WR_ATOMIC_FETCH_AND_ADD:
+ bnxt_opcd = BNXT_RE_WR_OPCD_ATOMIC_FA;
+ break;
/* TODO: Add other opcodes */
default:
- bnxt_opcd = 0xFF;
+ bnxt_opcd = BNXT_RE_WR_OPCD_INVAL;
break;
};
@@ -83,6 +83,16 @@ static inline void iowrite32(__u32 *dst, __le32 *src)
*(volatile __le32 *)dst = *src;
}
+static inline __u32 upper_32_bits(uint64_t n)
+{
+ return (__u32)((n >> 16) >> 16);
+}
+
+static inline __u32 lower_32_bits(uint64_t n)
+{
+ return (__u32)(n & 0xFFFFFFFFUL);
+}
+
/* Basic queue operation */
static inline uint32_t bnxt_re_is_que_full(struct bnxt_re_queue *que)
{
@@ -1068,6 +1068,8 @@ static int bnxt_re_build_send_sqe(struct bnxt_re_qp *qp, void *wqe,
/* Fill Header */
opcode = bnxt_re_ibv_to_bnxt_wr_opcd(wr->opcode);
+ if (opcode == BNXT_RE_WR_OPCD_INVAL)
+ return -EINVAL;
hdrval = (opcode & BNXT_RE_HDR_WT_MASK);
if (is_inline) {
@@ -1115,6 +1117,39 @@ static int bnxt_re_build_rdma_sqe(struct bnxt_re_qp *qp, void *wqe,
return len;
}
+static int bnxt_re_build_cns_sqe(struct bnxt_re_qp *qp, void *wqe,
+ struct ibv_send_wr *wr)
+{
+ struct bnxt_re_bsqe *hdr = wqe;
+ struct bnxt_re_atomic *sqe = ((void *)wqe +
+ sizeof(struct bnxt_re_bsqe));
+ int len;
+
+ len = bnxt_re_build_send_sqe(qp, wqe, wr, false);
+ hdr->key_immd = htole32(wr->wr.atomic.rkey);
+ sqe->rva = htole64(wr->wr.atomic.remote_addr);
+ sqe->cmp_dt = htole64(wr->wr.atomic.compare_add);
+ sqe->swp_dt = htole64(wr->wr.atomic.swap);
+
+ return len;
+}
+
+static int bnxt_re_build_fna_sqe(struct bnxt_re_qp *qp, void *wqe,
+ struct ibv_send_wr *wr)
+{
+ struct bnxt_re_bsqe *hdr = wqe;
+ struct bnxt_re_atomic *sqe = ((void *)wqe +
+ sizeof(struct bnxt_re_bsqe));
+ int len;
+
+ len = bnxt_re_build_send_sqe(qp, wqe, wr, false);
+ hdr->key_immd = htole32(wr->wr.atomic.rkey);
+ sqe->rva = htole64(wr->wr.atomic.remote_addr);
+ sqe->cmp_dt = htole64(wr->wr.atomic.compare_add);
+
+ return len;
+}
+
int bnxt_re_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
struct ibv_send_wr **bad)
{
@@ -1168,27 +1203,28 @@ int bnxt_re_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
else
bytes = bnxt_re_build_send_sqe(qp, sqe, wr,
is_inline);
- if (bytes < 0)
- ret = (bytes == -EINVAL) ? EINVAL : ENOMEM;
break;
case IBV_WR_RDMA_WRITE_WITH_IMM:
hdr->key_immd = htole32(be32toh(wr->imm_data));
case IBV_WR_RDMA_WRITE:
bytes = bnxt_re_build_rdma_sqe(qp, sqe, wr, is_inline);
- if (bytes < 0)
- ret = ENOMEM;
break;
case IBV_WR_RDMA_READ:
bytes = bnxt_re_build_rdma_sqe(qp, sqe, wr, false);
- if (bytes < 0)
- ret = ENOMEM;
+ break;
+ case IBV_WR_ATOMIC_CMP_AND_SWP:
+ bytes = bnxt_re_build_cns_sqe(qp, sqe, wr);
+ break;
+ case IBV_WR_ATOMIC_FETCH_AND_ADD:
+ bytes = bnxt_re_build_fna_sqe(qp, sqe, wr);
break;
default:
- ret = EINVAL;
+ bytes = -EINVAL;
break;
}
- if (ret) {
+ if (bytes < 0) {
+ ret = (bytes == -EINVAL) ? EINVAL : ENOMEM;
*bad = wr;
break;
}