diff mbox

[rdma-core,05/20] mlx5: Use kernel uapi headers

Message ID 20180320211850.10090-6-jgg@ziepe.ca (mailing list archive)
State Not Applicable
Headers show

Commit Message

Jason Gunthorpe March 20, 2018, 9:18 p.m. UTC
From: Jason Gunthorpe <jgg@mellanox.com>

Several of the enums and structs duplicate the kernel header version,
just use the kernel one natively.

ibv_packet_pacing_caps is not part of the kernel ABI and should not be used
like this, open code the copy instead.

This is enough to now include mlx5-abi.h into the build without compile
errors.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
---
 providers/mlx5/mlx5-abi.h | 84 ++---------------------------------------------
 providers/mlx5/mlx5.c     |  4 +--
 providers/mlx5/mlx5.h     |  6 ----
 providers/mlx5/verbs.c    | 23 ++++++++-----
 4 files changed, 18 insertions(+), 99 deletions(-)
diff mbox

Patch

diff --git a/providers/mlx5/mlx5-abi.h b/providers/mlx5/mlx5-abi.h
index dded202642247a..013a8ef0ed1c8d 100644
--- a/providers/mlx5/mlx5-abi.h
+++ b/providers/mlx5/mlx5-abi.h
@@ -35,24 +35,12 @@ 
 
 #include <infiniband/kern-abi.h>
 #include <infiniband/verbs.h>
+#include <rdma/mlx5-abi.h>
 #include "mlx5dv.h"
 
 #define MLX5_UVERBS_MIN_ABI_VERSION	1
 #define MLX5_UVERBS_MAX_ABI_VERSION	1
 
-enum {
-	MLX5_QP_FLAG_SIGNATURE		= 1 << 0,
-	MLX5_QP_FLAG_SCATTER_CQE	= 1 << 1,
-	MLX5_QP_FLAG_TUNNEL_OFFLOADS	= 1 << 2,
-	MLX5_QP_FLAG_BFREG_INDEX	= 1 << 3,
-	MLX5_QP_FLAG_TYPE_DCT		= 1 << 4,
-	MLX5_QP_FLAG_TYPE_DCI		= 1 << 5,
-};
-
-enum {
-	MLX5_RWQ_FLAG_SIGNATURE		= 1 << 0,
-};
-
 enum {
 	MLX5_NUM_NON_FP_BFREGS_PER_UAR	= 2,
 	NUM_BFREGS_PER_UAR		= 4,
@@ -62,10 +50,6 @@  enum {
 	MLX5_MED_BFREGS_TSHOLD		= 12,
 };
 
-enum mlx5_lib_caps {
-	MLX5_LIB_CAP_4K_UAR		= 1 << 0,
-};
-
 struct mlx5_alloc_ucontext {
 	struct ibv_get_context		ibv_req;
 	__u32				total_num_uuars;
@@ -79,31 +63,6 @@  struct mlx5_alloc_ucontext {
 	__u64				lib_caps;
 };
 
-enum mlx5_ib_alloc_ucontext_resp_mask {
-	MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
-};
-
-/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
-enum {
-	MLX5_IB_CLOCK_INFO_V1	= 0,
-};
-
-enum {
-	MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
-};
-
-struct mlx5_ib_clock_info {
-	__u32 sig;
-	__u32 resv;
-	__u64 nsec;
-	__u64 last_cycles;
-	__u64 frac;
-	__u32 mult;
-	__u32 shift;
-	__u64 mask;
-	__u64 overflow_period;
-};
-
 struct mlx5_alloc_ucontext_resp {
 	struct ib_uverbs_get_context_resp	ibv_resp;
 	__u32				qp_tab_size;
@@ -142,10 +101,6 @@  struct mlx5_alloc_pd_resp {
 	__u32				pdn;
 };
 
-enum mlx5_create_cq_flags {
-	MLX5_CREATE_CQ_FLAGS_CQE_128B_PAD	= 1 << 0,
-};
-
 struct mlx5_create_cq {
 	struct ibv_create_cq		ibv_cmd;
 	__u64				buf_addr;
@@ -246,10 +201,6 @@  struct mlx5_create_qp_resp {
 	__u32				uuar_index;
 };
 
-enum mlx5_create_wq_comp_mask {
-	MLX5_IB_CREATE_WQ_STRIDING_RQ =		1 << 0,
-};
-
 struct mlx5_drv_create_wq {
 	__u64		buf_addr;
 	__u64		db_addr;
@@ -304,37 +255,12 @@  struct mlx5_query_device_ex {
 	struct ibv_query_device_ex	ibv_cmd;
 };
 
-struct mlx5_reserved_tso_caps {
-	__u64 reserved;
-};
-
 struct mlx5_rss_caps {
 	__u64 rx_hash_fields_mask; /* enum ibv_rx_hash_fields */
 	__u8 rx_hash_function; /* enum ibv_rx_hash_function_flags */
 	__u8 reserved[7];
 };
 
-enum mlx5_ib_packet_pacing_cap_flags {
-	MLX5_IB_PP_SUPPORT_BURST	= 1 << 0,
-};
-
-struct mlx5_packet_pacing_caps {
-	struct ibv_packet_pacing_caps caps;
-	__u8   cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
-	__u8   reserved[3];
-};
-
-enum mlx5_mpw_caps {
-	MLX5_MPW_OBSOLETE	= 1 << 0, /* Obsoleted, don't use */
-	MLX5_ALLOW_MPW		= 1 << 1,
-	MLX5_SUPPORT_EMPW	= 1 << 2,
-};
-
-enum mlx5_query_dev_resp_flags {
-	MLX5_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP	= 1 << 0,
-	MLX5_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD	= 1 << 1,
-};
-
 struct mlx5_striding_rq_caps {
 	struct mlx5dv_striding_rq_caps	caps;
 	__u32				reserved;
@@ -362,13 +288,7 @@  struct mlx5_modify_qp_resp_ex {
 	__u32  dctn;
 };
 
-struct mlx5_ib_burst_info {
-	__u32	max_burst_sz;
-	__u16	typical_pkt_sz;
-	__u16	reserved;
-};
-
-struct mlx5_ib_modify_qp {
+struct mlx5_modify_qp {
 	struct ibv_modify_qp_ex		ibv_cmd;
 	__u32				comp_mask;
 	struct mlx5_ib_burst_info	burst_info;
diff --git a/providers/mlx5/mlx5.c b/providers/mlx5/mlx5.c
index 39a3971c01d192..3631b427a2d5ef 100644
--- a/providers/mlx5/mlx5.c
+++ b/providers/mlx5/mlx5.c
@@ -917,7 +917,7 @@  int mlx5dv_get_clock_info(struct ibv_context *ctx_in,
 	if (!ci)
 		return EINVAL;
 
-	sig = (atomic_uint32_t *)&ci->sig;
+	sig = (atomic_uint32_t *)&ci->sign;
 
 	do {
 		retry = 10;
@@ -930,7 +930,7 @@  repeat:
 			return EBUSY;
 		}
 		clock_info->nsec   = ci->nsec;
-		clock_info->last_cycles = ci->last_cycles;
+		clock_info->last_cycles = ci->cycles;
 		clock_info->frac   = ci->frac;
 		clock_info->mult   = ci->mult;
 		clock_info->shift  = ci->shift;
diff --git a/providers/mlx5/mlx5.h b/providers/mlx5/mlx5.h
index 9f640f73f4c6b9..b0f5f72c85af0a 100644
--- a/providers/mlx5/mlx5.h
+++ b/providers/mlx5/mlx5.h
@@ -149,7 +149,6 @@  enum {
 };
 
 enum {
-	MLX5_SRQ_FLAG_SIGNATURE		= (1 << 0),
 	MLX5_SRQ_FLAG_TM_SW_CNT		= (1 << 6),
 	MLX5_SRQ_FLAG_TM_CQE_REQ	= (1 << 7),
 };
@@ -186,11 +185,6 @@  enum mlx5_rsc_type {
 	MLX5_RSC_TYPE_INVAL,
 };
 
-enum {
-	MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
-	MLX5_USER_CMDS_SUPP_UHW_CREATE_AH    = 1 << 1,
-};
-
 enum mlx5_vendor_cap_flags {
 	MLX5_VENDOR_CAP_FLAGS_MPW		= 1 << 0, /* Obsoleted */
 	MLX5_VENDOR_CAP_FLAGS_MPW_ALLOWED	= 1 << 1,
diff --git a/providers/mlx5/verbs.c b/providers/mlx5/verbs.c
index 9ef4edd7792f62..cbdb76978d56ea 100644
--- a/providers/mlx5/verbs.c
+++ b/providers/mlx5/verbs.c
@@ -685,7 +685,7 @@  static struct ibv_cq_ex *create_cq(struct ibv_context *context,
 					goto err_db;
 				}
 
-				cmd.flags |= MLX5_CREATE_CQ_FLAGS_CQE_128B_PAD;
+				cmd.flags |= MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD;
 			}
 		}
 	}
@@ -2133,7 +2133,7 @@  int mlx5_modify_qp_rate_limit(struct ibv_qp *qp,
 {
 	struct ibv_qp_attr qp_attr = {};
 	struct ib_uverbs_ex_modify_qp_resp resp = {};
-	struct mlx5_ib_modify_qp cmd = {};
+	struct mlx5_modify_qp cmd = {};
 	struct mlx5_context *mctx = to_mctx(qp->context);
 	int ret;
 
@@ -2620,12 +2620,17 @@  int mlx5_query_device_ex(struct ibv_context *context,
 	attr->tso_caps = resp.tso_caps;
 	attr->rss_caps.rx_hash_fields_mask = resp.rss_caps.rx_hash_fields_mask;
 	attr->rss_caps.rx_hash_function = resp.rss_caps.rx_hash_function;
-	attr->packet_pacing_caps = resp.packet_pacing_caps.caps;
-
-	if (resp.support_multi_pkt_send_wqe & MLX5_ALLOW_MPW)
+	attr->packet_pacing_caps.qp_rate_limit_min =
+		resp.packet_pacing_caps.qp_rate_limit_min;
+	attr->packet_pacing_caps.qp_rate_limit_max =
+		resp.packet_pacing_caps.qp_rate_limit_max;
+	attr->packet_pacing_caps.supported_qpts =
+		resp.packet_pacing_caps.supported_qpts;
+
+	if (resp.support_multi_pkt_send_wqe & MLX5_IB_ALLOW_MPW)
 		mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_MPW_ALLOWED;
 
-	if (resp.support_multi_pkt_send_wqe & MLX5_SUPPORT_EMPW)
+	if (resp.support_multi_pkt_send_wqe & MLX5_IB_SUPPORT_EMPW)
 		mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_ENHANCED_MPW;
 
 	mctx->cqe_comp_caps = resp.cqe_comp_caps;
@@ -2634,10 +2639,10 @@  int mlx5_query_device_ex(struct ibv_context *context,
 	mctx->tunnel_offloads_caps = resp.tunnel_offloads_caps;
 	mctx->packet_pacing_caps = resp.packet_pacing_caps;
 
-	if (resp.flags & MLX5_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP)
+	if (resp.flags & MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP)
 		mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_CQE_128B_COMP;
 
-	if (resp.flags & MLX5_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD)
+	if (resp.flags & MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD)
 		mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_CQE_128B_PAD;
 
 	major     = (raw_fw_ver >> 32) & 0xffff;
@@ -2726,7 +2731,7 @@  static struct ibv_wq *create_wq(struct ibv_context *context,
 
 	rwq->wq_sig = rwq_sig_enabled(context);
 	if (rwq->wq_sig)
-		cmd.drv.flags = MLX5_RWQ_FLAG_SIGNATURE;
+		cmd.drv.flags = MLX5_WQ_FLAG_SIGNATURE;
 
 	ret = mlx5_calc_rwq_size(ctx, rwq, attr, mlx5wq_attr);
 	if (ret < 0) {