@@ -104,6 +104,10 @@ enum {
XSC_MAX_NAME_LEN = 32,
};
+enum {
+ XSC_MAX_EQ_NAME = 20
+};
+
enum {
XSC_MAX_PORTS = 2,
};
@@ -118,9 +122,160 @@ enum {
XSC_MAX_UUARS = XSC_MAX_UAR_PAGES * XSC_BF_REGS_PER_PAGE,
};
+// alloc.c
+struct xsc_buf_list {
+ void *buf;
+ dma_addr_t map;
+};
+
+struct xsc_buf {
+ struct xsc_buf_list direct;
+ struct xsc_buf_list *page_list;
+ int nbufs;
+ int npages;
+ int page_shift;
+ int size;
+};
+
+struct xsc_frag_buf {
+ struct xsc_buf_list *frags;
+ int npages;
+ int size;
+ u8 page_shift;
+};
+
+struct xsc_frag_buf_ctrl {
+ struct xsc_buf_list *frags;
+ u32 sz_m1;
+ u16 frag_sz_m1;
+ u16 strides_offset;
+ u8 log_sz;
+ u8 log_stride;
+ u8 log_frag_strides;
+};
+
+// xsc_core_qp
+struct xsc_send_wqe_ctrl_seg {
+ __le32 msg_opcode:8;
+ __le32 with_immdt:1;
+ __le32 csum_en:2;
+ __le32 ds_data_num:5;
+ __le32 wqe_id:16;
+ __le32 msg_len;
+ union {
+ __le32 opcode_data;
+ struct {
+ u8 has_pph:1;
+ u8 so_type:1;
+ __le16 so_data_size:14;
+ u8:8;
+ u8 so_hdr_len:8;
+ };
+ struct {
+ __le16 desc_id;
+ __le16 is_last_wqe:1;
+ __le16 dst_qp_id:15;
+ };
+ };
+ __le32 se:1;
+ __le32 ce:1;
+ __le32:30;
+};
+
+struct xsc_wqe_data_seg {
+ union {
+ __le32 in_line:1;
+ struct {
+ __le32:1;
+ __le32 seg_len:31;
+ __le32 mkey;
+ __le64 va;
+ };
+ struct {
+ __le32:1;
+ __le32 len:7;
+ u8 in_line_data[15];
+ };
+ };
+};
+
+struct xsc_core_qp {
+ void (*event)(struct xsc_core_qp *qp, int type);
+ int qpn;
+ atomic_t refcount;
+ struct completion free;
+ int pid;
+ u16 qp_type;
+ u16 eth_queue_type;
+ u16 qp_type_internal;
+ u16 grp_id;
+ u8 mac_id;
+};
+
+struct xsc_qp_table {
+ spinlock_t lock; /* protect radix tree */
+ struct radix_tree_root tree;
+};
+
+// cq
+enum xsc_event {
+ XSC_EVENT_TYPE_COMP = 0x0,
+ XSC_EVENT_TYPE_COMM_EST = 0x02,//mad
+ XSC_EVENT_TYPE_CQ_ERROR = 0x04,
+ XSC_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
+ XSC_EVENT_TYPE_INTERNAL_ERROR = 0x08,
+ XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,//IBV_EVENT_QP_REQ_ERR
+ XSC_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,//IBV_EVENT_QP_ACCESS_ERR
+};
+
+struct xsc_core_cq {
+ u32 cqn;
+ int cqe_sz;
+ u64 arm_db;
+ u64 ci_db;
+ struct xsc_core_device *dev;
+ atomic_t refcount;
+ struct completion free;
+ unsigned int vector;
+ int irqn;
+ u16 dim_us;
+ u16 dim_pkts;
+ void (*comp)(struct xsc_core_cq *cq);
+ void (*event)(struct xsc_core_cq *cq, enum xsc_event);
+ u32 cons_index;
+ unsigned int arm_sn;
+ int pid;
+ u32 reg_next_cid;
+ u32 reg_done_pid;
+ struct xsc_eq *eq;
+};
+
+struct xsc_cq_table {
+ spinlock_t lock; /* protect radix tree */
+ struct radix_tree_root tree;
+};
+
+struct xsc_eq {
+ struct xsc_core_device *dev;
+ struct xsc_cq_table cq_table;
+ u32 doorbell;//offset from bar0/2 space start
+ u32 cons_index;
+ struct xsc_buf buf;
+ int size;
+ unsigned int irqn;
+ u16 eqn;
+ int nent;
+ cpumask_var_t mask;
+ char name[XSC_MAX_EQ_NAME];
+ struct list_head list;
+ int index;
+};
+
struct xsc_dev_resource {
- struct mutex alloc_mutex; /* protect buffer alocation according to numa node */
- int numa_node;
+ struct xsc_qp_table qp_table;
+ struct xsc_cq_table cq_table;
+
+ struct mutex alloc_mutex; // protect buffer alloc
};
struct xsc_reg_addr {
@@ -302,11 +457,18 @@ struct xsc_core_device {
u8 fw_version_extra_flag;
};
-void xsc_free_board_info(void);
-int xsc_cmd_query_hca_cap(struct xsc_core_device *dev,
- struct xsc_caps *caps);
-int xsc_query_guid(struct xsc_core_device *dev);
-int xsc_activate_hw_config(struct xsc_core_device *dev);
-int xsc_reset_function_resource(struct xsc_core_device *dev);
+int xsc_core_create_resource_common(struct xsc_core_device *xdev,
+ struct xsc_core_qp *qp);
+void xsc_core_destroy_resource_common(struct xsc_core_device *xdev,
+ struct xsc_core_qp *qp);
+
+static inline void *xsc_buf_offset(struct xsc_buf *buf, int offset)
+{
+ if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
+ return buf->direct.buf + offset;
+ else
+ return buf->page_list[offset >> PAGE_SHIFT].buf +
+ (offset & (PAGE_SIZE - 1));
+}
#endif
@@ -6,4 +6,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc
obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc_pci.o
-xsc_pci-y := main.o cmdq.o hw.o
+xsc_pci-y := main.o cmdq.o hw.o qp.o cq.o
+
new file mode 100644
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include "common/xsc_core.h"
+#include "cq.h"
+
+void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type)
+{
+ struct xsc_cq_table *table = &xdev->dev_res->cq_table;
+ struct xsc_core_cq *cq;
+
+ spin_lock(&table->lock);
+
+ cq = radix_tree_lookup(&table->tree, cqn);
+ if (cq)
+ atomic_inc(&cq->refcount);
+
+ spin_unlock(&table->lock);
+
+ if (!cq) {
+ xsc_core_warn(xdev, "Async event for bogus CQ 0x%x\n", cqn);
+ return;
+ }
+
+ cq->event(cq, event_type);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+void xsc_init_cq_table(struct xsc_core_device *dev)
+{
+ struct xsc_cq_table *table = &dev->dev_res->cq_table;
+
+ spin_lock_init(&table->lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+}
new file mode 100644
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef XSC_CQ_H
+#define XSC_CQ_H
+
+#include "common/xsc_core.h"
+
+void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type);
+void xsc_init_cq_table(struct xsc_core_device *dev);
+
+#endif /* XSC_CQ_H */
@@ -6,6 +6,8 @@
#include "common/xsc_core.h"
#include "common/xsc_driver.h"
#include "hw.h"
+#include "qp.h"
+#include "cq.h"
unsigned int xsc_debug_mask;
module_param_named(debug_mask, xsc_debug_mask, uint, 0644);
@@ -242,6 +244,9 @@ static int xsc_hw_setup(struct xsc_core_device *dev)
goto err_cmdq_ver_chk;
}
+ xsc_init_cq_table(dev);
+ xsc_init_qp_table(dev);
+
return 0;
err_cmdq_ver_chk:
xsc_cmd_cleanup(dev);
new file mode 100644
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#include <linux/gfp.h>
+#include <linux/time.h>
+#include <linux/export.h>
+#include <linux/kthread.h>
+#include "common/xsc_core.h"
+#include "qp.h"
+
+int xsc_core_create_resource_common(struct xsc_core_device *xdev,
+ struct xsc_core_qp *qp)
+{
+ struct xsc_qp_table *table = &xdev->dev_res->qp_table;
+ int err;
+
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, qp->qpn, qp);
+ spin_unlock_irq(&table->lock);
+ if (err)
+ return err;
+
+ atomic_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+ qp->pid = current->pid;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xsc_core_create_resource_common);
+
+void xsc_core_destroy_resource_common(struct xsc_core_device *xdev,
+ struct xsc_core_qp *qp)
+{
+ struct xsc_qp_table *table = &xdev->dev_res->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ radix_tree_delete(&table->tree, qp->qpn);
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+ wait_for_completion(&qp->free);
+}
+EXPORT_SYMBOL_GPL(xsc_core_destroy_resource_common);
+
+void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type)
+{
+ struct xsc_qp_table *table = &xdev->dev_res->qp_table;
+ struct xsc_core_qp *qp;
+
+ spin_lock(&table->lock);
+
+ qp = radix_tree_lookup(&table->tree, qpn);
+ if (qp)
+ atomic_inc(&qp->refcount);
+
+ spin_unlock(&table->lock);
+
+ if (!qp) {
+ xsc_core_warn(xdev, "Async event for bogus QP 0x%x\n", qpn);
+ return;
+ }
+
+ qp->event(qp, event_type);
+
+ if (atomic_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+
+void xsc_init_qp_table(struct xsc_core_device *xdev)
+{
+ struct xsc_qp_table *table = &xdev->dev_res->qp_table;
+
+ spin_lock_init(&table->lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+}
new file mode 100644
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd.
+ * All rights reserved.
+ */
+
+#ifndef XSC_QP_H
+#define XSC_QP_H
+
+#include "common/xsc_core.h"
+
+void xsc_init_qp_table(struct xsc_core_device *xdev);
+void xsc_cleanup_qp_table(struct xsc_core_device *xdev);
+void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type);
+
+#endif /* XSC_QP_H */