@@ -57,6 +57,7 @@ endfunction()
rdma_kernel_provider_abi(
rdma/ib_user_verbs.h
rdma/mlx4-abi.h
+ rdma/mlx5-abi.h
rdma/qedr-abi.h
rdma/rdma_user_rxe.h
)
@@ -36,6 +36,7 @@
#include <infiniband/kern-abi.h>
#include <infiniband/verbs.h>
#include <rdma/mlx5-abi.h>
+#include <kernel-abi/mlx5-abi.h>
#include "mlx5dv.h"
#define MLX5_UVERBS_MIN_ABI_VERSION 1
@@ -50,202 +51,38 @@ enum {
MLX5_MED_BFREGS_TSHOLD = 12,
};
-struct mlx5_alloc_ucontext {
- struct ibv_get_context ibv_req;
- __u32 total_num_uuars;
- __u32 num_low_latency_uuars;
- __u32 flags;
- __u32 comp_mask;
- __u8 cqe_version;
- __u8 reserved0;
- __u16 reserved1;
- __u32 reserved2;
- __u64 lib_caps;
-};
-
-struct mlx5_alloc_ucontext_resp {
- struct ib_uverbs_get_context_resp ibv_resp;
- __u32 qp_tab_size;
- __u32 bf_reg_size;
- __u32 tot_uuars;
- __u32 cache_line_size;
- __u16 max_sq_desc_sz;
- __u16 max_rq_desc_sz;
- __u32 max_send_wqebb;
- __u32 max_recv_wr;
- __u32 max_srq_recv_wr;
- __u16 num_ports;
- __u16 reserved1;
- __u32 comp_mask;
- __u32 response_length;
- __u8 cqe_version;
- __u8 cmds_supp_uhw;
- __u8 reserved2;
- __u8 clock_info_versions;
- __u64 hca_core_clock_offset;
- __u32 log_uar_size;
- __u32 num_uars_per_page;
- __u32 num_dyn_bfregs;
- __u32 reserved3;
-};
-
-struct mlx5_create_ah_resp {
- struct ib_uverbs_create_ah_resp ibv_resp;
- __u32 response_length;
- __u8 dmac[ETHERNET_LL_SIZE];
- __u8 reserved[6];
-};
-
-struct mlx5_alloc_pd_resp {
- struct ib_uverbs_alloc_pd_resp ibv_resp;
- __u32 pdn;
-};
-
-struct mlx5_create_cq {
- struct ibv_create_cq ibv_cmd;
- __u64 buf_addr;
- __u64 db_addr;
- __u32 cqe_size;
- __u8 cqe_comp_en;
- __u8 cqe_comp_res_format;
- __u16 flags; /* Use enum mlx5_create_cq_flags */
-};
-
-struct mlx5_create_cq_resp {
- struct ib_uverbs_create_cq_resp ibv_resp;
- __u32 cqn;
-};
-
-struct mlx5_create_srq {
- struct ibv_create_srq ibv_cmd;
- __u64 buf_addr;
- __u64 db_addr;
- __u32 flags;
-};
-
-struct mlx5_create_srq_resp {
- struct ib_uverbs_create_srq_resp ibv_resp;
- __u32 srqn;
- __u32 reserved;
-};
-
-struct mlx5_create_srq_ex {
- struct ibv_create_xsrq ibv_cmd;
- __u64 buf_addr;
- __u64 db_addr;
- __u32 flags;
- __u32 reserved;
- __u32 uidx;
- __u32 reserved1;
-};
-
-struct mlx5_create_qp_ex {
- struct ibv_create_qp_ex ibv_cmd;
- struct mlx5_ib_create_qp drv_ex;
-};
-
-struct mlx5_create_qp_ex_rss {
- struct ibv_create_qp_ex ibv_cmd;
- __u64 rx_hash_fields_mask; /* enum ibv_rx_hash_fields */
- __u8 rx_hash_function; /* enum ibv_rx_hash_function_flags */
- __u8 rx_key_len;
- __u8 reserved[6];
- __u8 rx_hash_key[128];
- __u32 comp_mask;
- __u32 create_flags;
-};
-
-struct mlx5_create_qp_resp_ex {
- struct ib_uverbs_ex_create_qp_resp ibv_resp;
- __u32 uuar_index;
- __u32 reserved;
-};
-
-struct mlx5_create_qp {
- struct ibv_create_qp ibv_cmd;
- __u64 buf_addr;
- __u64 db_addr;
- __u32 sq_wqe_count;
- __u32 rq_wqe_count;
- __u32 rq_wqe_shift;
- __u32 flags;
- __u32 uidx;
- __u32 bfreg_index;
- union {
- /* SQ buffer address - used for Raw Packet QP */
- __u64 sq_buf_addr;
- /* DC access key - used to create a DCT QP */
- __u64 access_key;
- };
-};
-
-struct mlx5_create_qp_resp {
- struct ib_uverbs_create_qp_resp ibv_resp;
- __u32 uuar_index;
-};
-
-struct mlx5_create_wq {
- struct ibv_create_wq ibv_cmd;
- struct mlx5_ib_create_wq drv;
-};
-
-struct mlx5_create_wq_resp {
- struct ib_uverbs_ex_create_wq_resp ibv_resp;
- __u32 response_length;
- __u32 reserved;
-};
-
-struct mlx5_modify_wq {
- struct ibv_modify_wq ibv_cmd;
- __u32 comp_mask;
- __u32 reserved;
-};
-
-struct mlx5_create_rwq_ind_table_resp {
- struct ib_uverbs_ex_create_rwq_ind_table_resp ibv_resp;
-};
-
-struct mlx5_destroy_rwq_ind_table {
- struct ibv_destroy_rwq_ind_table ibv_cmd;
-};
-
-struct mlx5_resize_cq {
- struct ibv_resize_cq ibv_cmd;
- __u64 buf_addr;
- __u16 cqe_size;
- __u16 reserved0;
- __u32 reserved1;
-};
-
-struct mlx5_resize_cq_resp {
- struct ib_uverbs_resize_cq_resp ibv_resp;
-};
-
-struct mlx5_query_device_ex {
- struct ibv_query_device_ex ibv_cmd;
-};
-
-struct mlx5_query_device_ex_resp {
- struct ib_uverbs_ex_query_device_resp ibv_resp;
- __u32 comp_mask;
- __u32 response_length;
- struct mlx5_ib_tso_caps tso_caps;
- struct mlx5_ib_rss_caps rss_caps; /* vendor data channel */
- struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
- struct mlx5_packet_pacing_caps packet_pacing_caps;
- __u32 support_multi_pkt_send_wqe;
- __u32 flags; /* Use enum mlx5_query_dev_resp_flags */
- struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
- struct mlx5_ib_striding_rq_caps striding_rq_caps;
- __u32 tunnel_offloads_caps;
- __u32 reserved;
-};
-
-struct mlx5_modify_qp_resp_ex {
- struct ib_uverbs_ex_modify_qp_resp base;
- __u32 response_length;
- __u32 dctn;
-};
+DECLARE_DRV_CMD(mlx5_alloc_ucontext, IB_USER_VERBS_CMD_GET_CONTEXT,
+ mlx5_ib_alloc_ucontext_req_v2, mlx5_ib_alloc_ucontext_resp);
+DECLARE_DRV_CMD(mlx5_create_ah, IB_USER_VERBS_CMD_CREATE_AH,
+ empty, mlx5_ib_create_ah_resp);
+DECLARE_DRV_CMD(mlx5_alloc_pd, IB_USER_VERBS_CMD_ALLOC_PD,
+ empty, mlx5_ib_alloc_pd_resp);
+DECLARE_DRV_CMD(mlx5_create_cq, IB_USER_VERBS_CMD_CREATE_CQ,
+ mlx5_ib_create_cq, mlx5_ib_create_cq_resp);
+DECLARE_DRV_CMD(mlx5_create_srq, IB_USER_VERBS_CMD_CREATE_SRQ,
+ mlx5_ib_create_srq, mlx5_ib_create_srq_resp);
+DECLARE_DRV_CMD(mlx5_create_srq_ex, IB_USER_VERBS_CMD_CREATE_XSRQ,
+ mlx5_ib_create_srq, mlx5_ib_create_srq_resp);
+DECLARE_DRV_CMD(mlx5_create_qp_ex, IB_USER_VERBS_EX_CMD_CREATE_QP,
+ mlx5_ib_create_qp, mlx5_ib_create_qp_resp);
+DECLARE_DRV_CMD(mlx5_create_qp_ex_rss, IB_USER_VERBS_EX_CMD_CREATE_QP,
+ mlx5_ib_create_qp_rss, mlx5_ib_create_qp_resp);
+DECLARE_DRV_CMD(mlx5_create_qp, IB_USER_VERBS_CMD_CREATE_QP,
+ mlx5_ib_create_qp, mlx5_ib_create_qp_resp);
+DECLARE_DRV_CMD(mlx5_create_wq, IB_USER_VERBS_EX_CMD_CREATE_WQ,
+ mlx5_ib_create_wq, mlx5_ib_create_wq_resp);
+DECLARE_DRV_CMD(mlx5_modify_wq, IB_USER_VERBS_EX_CMD_MODIFY_WQ,
+ mlx5_ib_modify_wq, empty);
+DECLARE_DRV_CMD(mlx5_create_rwq_ind_table, IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL,
+ empty, empty);
+DECLARE_DRV_CMD(mlx5_destroy_rwq_ind_table, IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL,
+ empty, empty);
+DECLARE_DRV_CMD(mlx5_resize_cq, IB_USER_VERBS_CMD_RESIZE_CQ,
+ mlx5_ib_resize_cq, empty);
+DECLARE_DRV_CMD(mlx5_query_device_ex, IB_USER_VERBS_EX_CMD_QUERY_DEVICE,
+ empty, mlx5_ib_query_device_resp);
+DECLARE_DRV_CMD(mlx5_modify_qp_ex, IB_USER_VERBS_EX_CMD_MODIFY_QP,
+ empty, mlx5_ib_modify_qp_resp);
struct mlx5_modify_qp {
struct ibv_modify_qp_ex ibv_cmd;
@@ -577,7 +577,7 @@ static int mlx5_cmd_get_context(struct mlx5_context *context,
{
struct verbs_context *verbs_ctx = &context->ibv_ctx;
- if (!ibv_cmd_get_context(verbs_ctx, &req->ibv_req,
+ if (!ibv_cmd_get_context(verbs_ctx, &req->ibv_cmd,
req_len, &resp->ibv_resp, resp_len))
return 0;
@@ -600,14 +600,14 @@ static int mlx5_cmd_get_context(struct mlx5_context *context,
* to do so. If zero is a valid response, we will add a new
* field that indicates whether the request was handled.
*/
- if (!ibv_cmd_get_context(verbs_ctx, &req->ibv_req,
+ if (!ibv_cmd_get_context(verbs_ctx, &req->ibv_cmd,
offsetof(struct mlx5_alloc_ucontext, lib_caps),
&resp->ibv_resp, resp_len))
return 0;
- return ibv_cmd_get_context(verbs_ctx, &req->ibv_req,
+ return ibv_cmd_get_context(verbs_ctx, &req->ibv_cmd,
offsetof(struct mlx5_alloc_ucontext,
- cqe_version),
+ max_cqe_version),
&resp->ibv_resp, resp_len);
}
@@ -1010,9 +1010,9 @@ static struct verbs_context *mlx5_alloc_context(struct ibv_device *ibdev,
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- req.total_num_uuars = tot_uuars;
- req.num_low_latency_uuars = low_lat_uuars;
- req.cqe_version = MLX5_CQE_VERSION_V1;
+ req.total_num_bfregs = tot_uuars;
+ req.num_low_latency_bfregs = low_lat_uuars;
+ req.max_cqe_version = MLX5_CQE_VERSION_V1;
req.lib_caps |= MLX5_LIB_CAP_4K_UAR;
if (mlx5_cmd_get_context(context, &req, sizeof(req), &resp,
@@ -1021,7 +1021,7 @@ static struct verbs_context *mlx5_alloc_context(struct ibv_device *ibdev,
context->max_num_qps = resp.qp_tab_size;
context->bf_reg_size = resp.bf_reg_size;
- context->tot_uuars = resp.tot_uuars;
+ context->tot_uuars = resp.tot_bfregs;
context->low_lat_uuars = low_lat_uuars;
context->cache_line_size = resp.cache_line_size;
context->max_sq_desc_sz = resp.max_sq_desc_sz;
@@ -1429,7 +1429,7 @@ static int mlx5_cmd_create_rss_qp(struct ibv_context *context,
uint32_t mlx5_create_flags)
{
struct mlx5_create_qp_ex_rss cmd_ex_rss = {};
- struct mlx5_create_qp_resp_ex resp = {};
+ struct mlx5_create_qp_ex_resp resp = {};
int ret;
if (attr->rx_hash_conf.rx_hash_key_len > sizeof(cmd_ex_rss.rx_hash_key)) {
@@ -1440,7 +1440,7 @@ static int mlx5_cmd_create_rss_qp(struct ibv_context *context,
cmd_ex_rss.rx_hash_fields_mask = attr->rx_hash_conf.rx_hash_fields_mask;
cmd_ex_rss.rx_hash_function = attr->rx_hash_conf.rx_hash_function;
cmd_ex_rss.rx_key_len = attr->rx_hash_conf.rx_hash_key_len;
- cmd_ex_rss.create_flags = mlx5_create_flags;
+ cmd_ex_rss.flags = mlx5_create_flags;
memcpy(cmd_ex_rss.rx_hash_key, attr->rx_hash_conf.rx_hash_key,
attr->rx_hash_conf.rx_hash_key_len);
@@ -1460,7 +1460,7 @@ static int mlx5_cmd_create_qp_ex(struct ibv_context *context,
struct ibv_qp_init_attr_ex *attr,
struct mlx5_create_qp *cmd,
struct mlx5_qp *qp,
- struct mlx5_create_qp_resp_ex *resp)
+ struct mlx5_create_qp_ex_resp *resp)
{
struct mlx5_create_qp_ex cmd_ex;
int ret;
@@ -1468,9 +1468,7 @@ static int mlx5_cmd_create_qp_ex(struct ibv_context *context,
memset(&cmd_ex, 0, sizeof(cmd_ex));
*ibv_create_cq_ex_to_reg(&cmd_ex.ibv_cmd) = cmd->ibv_cmd.core_payload;
- memcpy(&cmd_ex.drv_ex, &cmd->buf_addr,
- offsetof(typeof(*cmd), sq_buf_addr) +
- sizeof(cmd->sq_buf_addr) - sizeof(cmd->ibv_cmd));
+ cmd_ex.drv_payload = cmd->drv_payload;
ret = ibv_cmd_create_qp_ex2(context, &qp->verbs_qp,
sizeof(qp->verbs_qp), attr,
@@ -1564,7 +1562,7 @@ static struct ibv_qp *create_qp(struct ibv_context *context,
{
struct mlx5_create_qp cmd;
struct mlx5_create_qp_resp resp;
- struct mlx5_create_qp_resp_ex resp_ex;
+ struct mlx5_create_qp_ex_resp resp_ex;
struct mlx5_qp *qp;
int ret;
struct mlx5_context *ctx = to_mctx(context);
@@ -1770,7 +1768,7 @@ static struct ibv_qp *create_qp(struct ibv_context *context,
}
uuar_index = (attr->comp_mask & MLX5_CREATE_QP_EX2_COMP_MASK) ?
- resp_ex.uuar_index : resp.uuar_index;
+ resp_ex.bfreg_index : resp.bfreg_index;
if (!ctx->cqe_version) {
if (qp->sq.wqe_cnt || qp->rq.wqe_cnt) {
ret = mlx5_store_qp(ctx, ibqp->qp_num, qp);
@@ -1979,7 +1977,7 @@ static int modify_dct(struct ibv_qp *qp, struct ibv_qp_attr *attr,
int attr_mask)
{
struct ibv_modify_qp_ex cmd_ex = {};
- struct mlx5_modify_qp_resp_ex resp = {};
+ struct mlx5_modify_qp_ex_resp resp = {};
struct mlx5_qp *mqp = to_mqp(qp);
struct mlx5_context *context = to_mctx(qp->context);
int min_resp_size;
@@ -1989,8 +1987,8 @@ static int modify_dct(struct ibv_qp *qp, struct ibv_qp_attr *attr,
ret = ibv_cmd_modify_qp_ex(qp, attr, attr_mask,
&cmd_ex,
sizeof(cmd_ex), sizeof(cmd_ex),
- &resp.base,
- sizeof(resp.base), sizeof(resp));
+ &resp.ibv_resp,
+ sizeof(resp.ibv_resp), sizeof(resp));
if (ret)
return ret;
@@ -2008,7 +2006,7 @@ static int modify_dct(struct ibv_qp *qp, struct ibv_qp_attr *attr,
min_resp_size =
offsetof(typeof(resp), dctn) +
sizeof(resp.dctn) -
- sizeof(resp.base);
+ sizeof(resp.ibv_resp);
if (resp.response_length < min_resp_size) {
errno = EINVAL;
@@ -2628,10 +2626,10 @@ int mlx5_query_device_ex(struct ibv_context *context,
attr->packet_pacing_caps.supported_qpts =
resp.packet_pacing_caps.supported_qpts;
- if (resp.support_multi_pkt_send_wqe & MLX5_IB_ALLOW_MPW)
+ if (resp.mlx5_ib_support_multi_pkt_send_wqes & MLX5_IB_ALLOW_MPW)
mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_MPW_ALLOWED;
- if (resp.support_multi_pkt_send_wqe & MLX5_IB_SUPPORT_EMPW)
+ if (resp.mlx5_ib_support_multi_pkt_send_wqes & MLX5_IB_SUPPORT_EMPW)
mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_ENHANCED_MPW;
mctx->cqe_comp_caps.max_num = resp.cqe_comp_caps.max_num;
@@ -2745,7 +2743,7 @@ static struct ibv_wq *create_wq(struct ibv_context *context,
rwq->wq_sig = rwq_sig_enabled(context);
if (rwq->wq_sig)
- cmd.drv.flags = MLX5_WQ_FLAG_SIGNATURE;
+ cmd.flags = MLX5_WQ_FLAG_SIGNATURE;
ret = mlx5_calc_rwq_size(ctx, rwq, attr, mlx5wq_attr);
if (ret < 0) {
@@ -2770,17 +2768,17 @@ static struct ibv_wq *create_wq(struct ibv_context *context,
rwq->db[MLX5_SND_DBR] = 0;
rwq->pbuff = rwq->buf.buf + rwq->rq.offset;
rwq->recv_db = &rwq->db[MLX5_RCV_DBR];
- cmd.drv.buf_addr = (uintptr_t)rwq->buf.buf;
- cmd.drv.db_addr = (uintptr_t)rwq->db;
- cmd.drv.rq_wqe_count = rwq->rq.wqe_cnt;
- cmd.drv.rq_wqe_shift = rwq->rq.wqe_shift;
+ cmd.buf_addr = (uintptr_t)rwq->buf.buf;
+ cmd.db_addr = (uintptr_t)rwq->db;
+ cmd.rq_wqe_count = rwq->rq.wqe_cnt;
+ cmd.rq_wqe_shift = rwq->rq.wqe_shift;
usr_idx = mlx5_store_uidx(ctx, rwq);
if (usr_idx < 0) {
mlx5_dbg(fp, MLX5_DBG_QP, "Couldn't find free user index\n");
goto err_free_db_rec;
}
- cmd.drv.user_index = usr_idx;
+ cmd.user_index = usr_idx;
if (mlx5wq_attr) {
if (mlx5wq_attr->comp_mask & MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ) {
@@ -2800,13 +2798,13 @@ static struct ibv_wq *create_wq(struct ibv_context *context,
goto err_create;
}
- cmd.drv.single_stride_log_num_of_bytes =
+ cmd.single_stride_log_num_of_bytes =
mlx5wq_attr->striding_rq_attrs.single_stride_log_num_of_bytes;
- cmd.drv.single_wqe_log_num_of_strides =
+ cmd.single_wqe_log_num_of_strides =
mlx5wq_attr->striding_rq_attrs.single_wqe_log_num_of_strides;
- cmd.drv.two_byte_shift_en =
+ cmd.two_byte_shift_en =
mlx5wq_attr->striding_rq_attrs.two_byte_shift_en;
- cmd.drv.comp_mask |= MLX5_IB_CREATE_WQ_STRIDING_RQ;
+ cmd.comp_mask |= MLX5_IB_CREATE_WQ_STRIDING_RQ;
}
}
@@ -2819,13 +2817,13 @@ static struct ibv_wq *create_wq(struct ibv_context *context,
goto err_create;
rwq->rsc.type = MLX5_RSC_TYPE_RWQ;
- rwq->rsc.rsn = cmd.drv.user_index;
+ rwq->rsc.rsn = cmd.user_index;
rwq->wq.post_recv = mlx5_post_wq_recv;
return &rwq->wq;
err_create:
- mlx5_clear_uidx(ctx, cmd.drv.user_index);
+ mlx5_clear_uidx(ctx, cmd.user_index);
err_free_db_rec:
mlx5_free_db(to_mctx(context), rwq->db);
err_free_rwq_buf: