@@ -642,6 +642,36 @@ static int qp_idle(struct rvt_qp *qp)
}
/**
+ * ib_cq_tail - Return tail index of cq buffer
+ * @send_cq - The cq for send
+ *
+ * This is called in qp_iter_print to get tail
+ * of cq buffer.
+ */
+static u32 ib_cq_tail(struct ib_cq *send_cq)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
+ return ibcq_to_rvtcq(send_cq)->ip ?
+ RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
+ ibcq_to_rvtcq(send_cq)->kqueue->tail;
+}
+
+/**
+ * ib_cq_head - Return head index of cq buffer
+ * @send_cq - The cq for send
+ *
+ * This is called in qp_iter_print to get head
+ * of cq buffer.
+ */
+static u32 ib_cq_head(struct ib_cq *send_cq)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
+ return ibcq_to_rvtcq(send_cq)->ip ?
+ RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
+ ibcq_to_rvtcq(send_cq)->kqueue->head;
+}
+
+/**
* qp_iter_print - print the qp information to seq_file
* @s: the seq_file to emit the qp information on
* @iter: the iterator for the qp hash list
@@ -701,8 +731,8 @@ void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
sde ? sde->this_idx : 0,
send_context,
send_context ? send_context->sw_index : 0,
- ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
- ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
+ ib_cq_head(qp->ibqp.send_cq),
+ ib_cq_tail(qp->ibqp.send_cq),
qp->pid,
qp->s_state,
qp->s_ack_state,
@@ -63,19 +63,33 @@
*/
void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
{
- struct rvt_cq_wc *wc;
+ struct ib_uverbs_wc *uqueue = NULL;
+ struct ib_wc *kqueue = NULL;
+ struct rvt_cq_wc *u_wc = NULL;
+ struct rvt_k_cq_wc *k_wc = NULL;
unsigned long flags;
u32 head;
u32 next;
+ u32 tail;
spin_lock_irqsave(&cq->lock, flags);
+ if (cq->ip) {
+ u_wc = cq->queue;
+ uqueue = &u_wc->uqueue[0];
+ head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
+ tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
+ } else {
+ k_wc = cq->kqueue;
+ kqueue = &k_wc->kqueue[0];
+ head = k_wc->head;
+ tail = k_wc->tail;
+ }
+
/*
- * Note that the head pointer might be writable by user processes.
- * Take care to verify it is a sane value.
+ * Note that the head pointer might be writable by
+ * user processes.Take care to verify it is a sane value.
*/
- wc = cq->queue;
- head = wc->head;
if (head >= (unsigned)cq->ibcq.cqe) {
head = cq->ibcq.cqe;
next = 0;
@@ -83,7 +97,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
next = head + 1;
}
- if (unlikely(next == wc->tail)) {
+ if (unlikely(next == tail)) {
spin_unlock_irqrestore(&cq->lock, flags);
if (cq->ibcq.event_handler) {
struct ib_event ev;
@@ -96,27 +110,27 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
return;
}
trace_rvt_cq_enter(cq, entry, head);
- if (cq->ip) {
- wc->uqueue[head].wr_id = entry->wr_id;
- wc->uqueue[head].status = entry->status;
- wc->uqueue[head].opcode = entry->opcode;
- wc->uqueue[head].vendor_err = entry->vendor_err;
- wc->uqueue[head].byte_len = entry->byte_len;
- wc->uqueue[head].ex.imm_data = entry->ex.imm_data;
- wc->uqueue[head].qp_num = entry->qp->qp_num;
- wc->uqueue[head].src_qp = entry->src_qp;
- wc->uqueue[head].wc_flags = entry->wc_flags;
- wc->uqueue[head].pkey_index = entry->pkey_index;
- wc->uqueue[head].slid = ib_lid_cpu16(entry->slid);
- wc->uqueue[head].sl = entry->sl;
- wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
- wc->uqueue[head].port_num = entry->port_num;
+ if (uqueue) {
+ uqueue[head].wr_id = entry->wr_id;
+ uqueue[head].status = entry->status;
+ uqueue[head].opcode = entry->opcode;
+ uqueue[head].vendor_err = entry->vendor_err;
+ uqueue[head].byte_len = entry->byte_len;
+ uqueue[head].ex.imm_data = entry->ex.imm_data;
+ uqueue[head].qp_num = entry->qp->qp_num;
+ uqueue[head].src_qp = entry->src_qp;
+ uqueue[head].wc_flags = entry->wc_flags;
+ uqueue[head].pkey_index = entry->pkey_index;
+ uqueue[head].slid = ib_lid_cpu16(entry->slid);
+ uqueue[head].sl = entry->sl;
+ uqueue[head].dlid_path_bits = entry->dlid_path_bits;
+ uqueue[head].port_num = entry->port_num;
/* Make sure entry is written before the head index. */
- smp_wmb();
+ RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
} else {
- wc->kqueue[head] = *entry;
+ kqueue[head] = *entry;
+ k_wc->head = next;
}
- wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED &&
@@ -183,7 +197,8 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
{
struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
struct rvt_cq *cq;
- struct rvt_cq_wc *wc;
+ struct rvt_cq_wc *u_wc = NULL;
+ struct rvt_k_cq_wc *k_wc = NULL;
struct ib_cq *ret;
u32 sz;
unsigned int entries = attr->cqe;
@@ -212,17 +227,22 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
* We need to use vmalloc() in order to support mmap and large
* numbers of entries.
*/
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
- else
- sz += sizeof(struct ib_wc) * (entries + 1);
- wc = udata ?
- vmalloc_user(sz) :
- vzalloc_node(sz, rdi->dparms.node);
- if (!wc) {
- ret = ERR_PTR(-ENOMEM);
- goto bail_cq;
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
+ sz += sizeof(*u_wc);
+ u_wc = vmalloc_user(sz);
+ if (!u_wc) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_cq;
+ }
+ } else {
+ sz = sizeof(struct ib_wc) * (entries + 1);
+ sz += sizeof(*k_wc);
+ k_wc = vzalloc_node(sz, rdi->dparms.node);
+ if (!k_wc) {
+ ret = ERR_PTR(-ENOMEM);
+ goto bail_cq;
+ }
}
/*
@@ -232,7 +252,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
if (udata && udata->outlen >= sizeof(__u64)) {
int err;
- cq->ip = rvt_create_mmap_info(rdi, sz, context, wc);
+ cq->ip = rvt_create_mmap_info(rdi, sz, context, u_wc);
if (!cq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
@@ -279,7 +299,10 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
cq->notify = RVT_CQ_NONE;
spin_lock_init(&cq->lock);
INIT_WORK(&cq->comptask, send_complete);
- cq->queue = wc;
+ if (u_wc)
+ cq->queue = u_wc;
+ else
+ cq->kqueue = k_wc;
ret = &cq->ibcq;
@@ -289,7 +312,8 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
bail_ip:
kfree(cq->ip);
bail_wc:
- vfree(wc);
+ vfree(u_wc);
+ vfree(k_wc);
bail_cq:
kfree(cq);
done:
@@ -346,9 +370,16 @@ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
- if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- cq->queue->head != cq->queue->tail)
- ret = 1;
+ if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+ if (cq->queue) {
+ if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
+ RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
+ ret = 1;
+ } else {
+ if (cq->kqueue->head != cq->kqueue->tail)
+ ret = 1;
+ }
+ }
spin_unlock_irqrestore(&cq->lock, flags);
@@ -364,12 +395,14 @@ int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
{
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
- struct rvt_cq_wc *old_wc;
- struct rvt_cq_wc *wc;
u32 head, tail, n;
int ret;
u32 sz;
struct rvt_dev_info *rdi = cq->rdi;
+ struct rvt_cq_wc *u_wc = NULL;
+ struct rvt_cq_wc *old_u_wc = NULL;
+ struct rvt_k_cq_wc *k_wc = NULL;
+ struct rvt_k_cq_wc *old_k_wc = NULL;
if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
return -EINVAL;
@@ -377,17 +410,19 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
/*
* Need to use vmalloc() if we want to support large #s of entries.
*/
- sz = sizeof(*wc);
- if (udata && udata->outlen >= sizeof(__u64))
- sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
- else
- sz += sizeof(struct ib_wc) * (cqe + 1);
- wc = udata ?
- vmalloc_user(sz) :
- vzalloc_node(sz, rdi->dparms.node);
- if (!wc)
- return -ENOMEM;
-
+ if (udata && udata->outlen >= sizeof(__u64)) {
+ sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
+ sz += sizeof(*u_wc);
+ u_wc = vmalloc_user(sz);
+ if (!u_wc)
+ return -ENOMEM;
+ } else {
+ sz = sizeof(struct ib_wc) * (cqe + 1);
+ sz += sizeof(*k_wc);
+ k_wc = vzalloc_node(sz, rdi->dparms.node);
+ if (!k_wc)
+ return -ENOMEM;
+ }
/* Check that we can write the offset to mmap. */
if (udata && udata->outlen >= sizeof(__u64)) {
__u64 offset = 0;
@@ -402,11 +437,18 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
* Make sure head and tail are sane since they
* might be user writable.
*/
- old_wc = cq->queue;
- head = old_wc->head;
+ if (u_wc) {
+ old_u_wc = cq->queue;
+ head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
+ tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
+ } else {
+ old_k_wc = cq->kqueue;
+ head = old_k_wc->head;
+ tail = old_k_wc->tail;
+ }
+
if (head > (u32)cq->ibcq.cqe)
head = (u32)cq->ibcq.cqe;
- tail = old_wc->tail;
if (tail > (u32)cq->ibcq.cqe)
tail = (u32)cq->ibcq.cqe;
if (head < tail)
@@ -418,27 +460,36 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
goto bail_unlock;
}
for (n = 0; tail != head; n++) {
- if (cq->ip)
- wc->uqueue[n] = old_wc->uqueue[tail];
+ if (u_wc)
+ u_wc->uqueue[n] = old_u_wc->uqueue[tail];
else
- wc->kqueue[n] = old_wc->kqueue[tail];
+ k_wc->kqueue[n] = old_k_wc->kqueue[tail];
if (tail == (u32)cq->ibcq.cqe)
tail = 0;
else
tail++;
}
cq->ibcq.cqe = cqe;
- wc->head = n;
- wc->tail = 0;
- cq->queue = wc;
+ if (u_wc) {
+ RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
+ RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
+ cq->queue = u_wc;
+ } else {
+ k_wc->head = n;
+ k_wc->tail = 0;
+ cq->kqueue = k_wc;
+ }
spin_unlock_irq(&cq->lock);
- vfree(old_wc);
+ if (u_wc)
+ vfree(old_u_wc);
+ else
+ vfree(old_k_wc);
if (cq->ip) {
struct rvt_mmap_info *ip = cq->ip;
- rvt_update_mmap_info(rdi, ip, sz, wc);
+ rvt_update_mmap_info(rdi, ip, sz, u_wc);
/*
* Return the offset to mmap.
@@ -462,7 +513,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
bail_unlock:
spin_unlock_irq(&cq->lock);
bail_free:
- vfree(wc);
+ vfree(u_wc);
+ vfree(k_wc);
+
return ret;
}
@@ -480,7 +533,7 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
{
struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
- struct rvt_cq_wc *wc;
+ struct rvt_k_cq_wc *wc;
unsigned long flags;
int npolled;
u32 tail;
@@ -491,7 +544,7 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
spin_lock_irqsave(&cq->lock, flags);
- wc = cq->queue;
+ wc = cq->kqueue;
tail = wc->tail;
if (tail > (u32)cq->ibcq.cqe)
tail = (u32)cq->ibcq.cqe;
@@ -59,20 +59,17 @@
* notifications are armed.
*/
#define RVT_CQ_NONE (IB_CQ_NEXT_COMP + 1)
+#include <rdma/rvt-abi.h>
/*
* This structure is used to contain the head pointer, tail pointer,
* and completion queue entries as a single memory allocation so
* it can be mmap'ed into user space.
*/
-struct rvt_cq_wc {
+struct rvt_k_cq_wc {
u32 head; /* index of next entry to fill */
u32 tail; /* index of next ib_poll_cq() entry */
- union {
- /* these are actually size ibcq.cqe + 1 */
- struct ib_uverbs_wc uqueue[0];
- struct ib_wc kqueue[0];
- };
+ struct ib_wc kqueue[0];
};
/*
@@ -88,6 +85,7 @@ struct rvt_cq {
struct rvt_dev_info *rdi;
struct rvt_cq_wc *queue;
struct rvt_mmap_info *ip;
+ struct rvt_k_cq_wc *kqueue;
};
static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq)
new file mode 100644
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+
+/*
+ * This file contains defines, structures, etc. that are used
+ * to communicate between kernel and user code.
+ */
+
+#ifndef RVT_ABI_USER_H
+#define RVT_ABI_USER_H
+
+#include <linux/types.h>
+#include <rdma/ib_user_verbs.h>
+#ifndef RDMA_ATOMIC_UAPI
+#define RDMA_ATOMIC_UAPI(_type, _name) struct{ _type val; }_name
+#endif
+#define RDMA_READ_UAPI_ATOMIC(member) smp_load_acquire(&(member).val)
+#define RDMA_WRITE_UAPI_ATOMIC(member, x) smp_store_release(&(member).val, x)
+/*
+ * This structure is used to contain the head pointer, tail pointer,
+ * and completion queue entries as a single memory allocation so
+ * it can be mmap'ed into user space.
+ */
+struct rvt_cq_wc {
+ /* index of next entry to fill */
+ RDMA_ATOMIC_UAPI(__u32, head);
+ /* index of next ib_poll_cq() entry */
+ RDMA_ATOMIC_UAPI(__u32, tail);
+
+ /* these are actually size ibcq.cqe + 1 */
+ struct ib_uverbs_wc uqueue[];
+};
+
+#endif /* RVT_ABI_USER_H */