@@ -25,6 +25,7 @@ publish_internal_headers(rdma
rdma/rdma_user_rxe.h
rdma/rvt-abi.h
rdma/vmw_pvrdma-abi.h
+ rdma/rvt-abi.h
)
publish_internal_headers(rdma/hfi
@@ -65,7 +65,8 @@
#include <infiniband/driver.h>
#include <infiniband/verbs.h>
-
+#define RDMA_ATOMIC_UAPI(_type, _name) _Atomic(_type) _name
+#include "rdma/rvt-abi.h"
#define PFX "hfi1: "
struct hfi1_device {
@@ -77,39 +78,11 @@ struct hfi1_context {
struct verbs_context ibv_ctx;
};
-/*
- * This structure needs to have the same size and offsets as
- * the kernel's ib_wc structure since it is memory mapped.
- */
-struct hfi1_wc {
- uint64_t wr_id;
- enum ibv_wc_status status;
- enum ibv_wc_opcode opcode;
- uint32_t vendor_err;
- uint32_t byte_len;
- uint32_t imm_data; /* in network byte order */
- uint32_t qp_num;
- uint32_t src_qp;
- enum ibv_wc_flags wc_flags;
- uint16_t pkey_index;
- uint16_t slid;
- uint8_t sl;
- uint8_t dlid_path_bits;
- uint8_t port_num;
-};
-
-struct hfi1_cq_wc {
- _Atomic(uint32_t) head;
- _Atomic(uint32_t) tail;
- struct hfi1_wc queue[1];
-};
-
struct hfi1_cq {
- struct ibv_cq ibv_cq;
- struct hfi1_cq_wc *queue;
- pthread_spinlock_t lock;
+ struct ibv_cq ibv_cq;
+ struct rvt_cq_wc *queue;
+ pthread_spinlock_t lock;
};
-
/*
* Receive work request queue entry.
* The size of the sg_list is determined when the QP is created and stored
@@ -63,10 +63,15 @@
#include <pthread.h>
#include <sys/mman.h>
#include <errno.h>
-
#include "hfiverbs.h"
#include "hfi-abi.h"
+static size_t hfi1_cq_size(int cqe)
+{
+ return sizeof(struct rvt_cq_wc) +
+ sizeof(struct ib_uverbs_wc) * (cqe + 1);
+}
+
int hfi1_query_device(struct ibv_context *context,
struct ibv_device_attr *attr)
{
@@ -186,7 +191,7 @@ struct ibv_cq *hfi1_create_cq(struct ibv_context *context, int cqe,
return NULL;
}
- size = sizeof(struct hfi1_cq_wc) + sizeof(struct hfi1_wc) * cqe;
+ size = hfi1_cq_size(cqe);
cq->queue = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
context->cmd_fd, resp.offset);
if ((void *) cq->queue == MAP_FAILED) {
@@ -231,8 +236,7 @@ int hfi1_resize_cq(struct ibv_cq *ibcq, int cqe)
memset(&resp, 0, sizeof(resp));
pthread_spin_lock(&cq->lock);
/* Save the old size so we can unmmap the queue. */
- size = sizeof(struct hfi1_cq_wc) +
- (sizeof(struct hfi1_wc) * cq->ibv_cq.cqe);
+ size = hfi1_cq_size(cq->ibv_cq.cqe);
ret = ibv_cmd_resize_cq(ibcq, cqe, &cmd, sizeof cmd,
&resp.ibv_resp, sizeof resp);
if (ret) {
@@ -240,8 +244,7 @@ int hfi1_resize_cq(struct ibv_cq *ibcq, int cqe)
return ret;
}
(void) munmap(cq->queue, size);
- size = sizeof(struct hfi1_cq_wc) +
- (sizeof(struct hfi1_wc) * cq->ibv_cq.cqe);
+ size = hfi1_cq_size(cqe);
cq->queue = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
ibcq->context->cmd_fd, resp.offset);
ret = errno;
@@ -269,8 +272,8 @@ int hfi1_destroy_cq(struct ibv_cq *ibcq)
if (ret)
return ret;
- (void) munmap(cq->queue, sizeof(struct hfi1_cq_wc) +
- (sizeof(struct hfi1_wc) * cq->ibv_cq.cqe));
+ (void) munmap(cq->queue, hfi1_cq_size(cq->ibv_cq.cqe));
+
free(cq);
return 0;
}
@@ -288,7 +291,7 @@ int hfi1_destroy_cq_v1(struct ibv_cq *ibcq)
int hfi1_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
{
struct hfi1_cq *cq = to_icq(ibcq);
- struct hfi1_cq_wc *q;
+ struct rvt_cq_wc *q;
int npolled;
uint32_t tail;
@@ -300,7 +303,7 @@ int hfi1_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
break;
/* Make sure entry is read after head index is read. */
atomic_thread_fence(memory_order_acquire);
- memcpy(wc, &q->queue[tail], sizeof(*wc));
+ memcpy(wc, &q->uqueue[tail], sizeof(*wc));
if (tail == cq->ibv_cq.cqe)
tail = 0;
else