@@ -32,7 +32,7 @@ uint32_t vendor_part_id; /* Vendor supplied part ID */
uint32_t hw_ver; /* Hardware version */
int max_qp; /* Maximum number of supported QPs */
int max_qp_wr; /* Maximum number of outstanding WR on any work queue */
-int device_cap_flags; /* HCA capabilities mask */
+unsigned int device_cap_flags; /* HCA capabilities mask */
int max_sge; /* Maximum number of s/g per WR for SQ & RQ of QP for non RDMA Read operations */
int max_sge_rd; /* Maximum number of s/g per WR for RDMA Read operations */
int max_cq; /* Maximum number of supported CQs */
@@ -161,7 +161,7 @@ struct ibv_device_attr {
uint32_t hw_ver;
int max_qp;
int max_qp_wr;
- int device_cap_flags;
+ unsigned int device_cap_flags;
int max_sge;
int max_sge_rd;
int max_cq;
@@ -527,7 +527,7 @@ struct ibv_wc {
};
uint32_t qp_num;
uint32_t src_qp;
- int wc_flags;
+ unsigned int wc_flags;
uint16_t pkey_index;
uint16_t slid;
uint8_t sl;
@@ -548,7 +548,7 @@ struct ibv_mw_bind_info {
struct ibv_mr *mr;
uint64_t addr;
uint64_t length;
- int mw_access_flags; /* use ibv_access_flags */
+ unsigned int mw_access_flags; /* use ibv_access_flags */
};
struct ibv_pd {
@@ -954,7 +954,7 @@ struct ibv_qp_attr {
uint32_t rq_psn;
uint32_t sq_psn;
uint32_t dest_qp_num;
- int qp_access_flags;
+ unsigned int qp_access_flags;
struct ibv_qp_cap cap;
struct ibv_ah_attr ah_attr;
struct ibv_ah_attr alt_ah_attr;
@@ -1008,7 +1008,7 @@ struct ibv_send_wr {
struct ibv_sge *sg_list;
int num_sge;
enum ibv_wr_opcode opcode;
- int send_flags;
+ unsigned int send_flags;
/* When opcode is *_WITH_IMM: Immediate data in network byte order.
* When opcode is *_INV: Stores the rkey to invalidate
*/
@@ -1090,7 +1090,7 @@ struct ibv_ops_wr {
struct ibv_mw_bind {
uint64_t wr_id;
- int send_flags;
+ unsigned int send_flags;
struct ibv_mw_bind_info bind_info;
};
@@ -1204,7 +1204,7 @@ struct ibv_cq_ex {
__be32 (*read_imm_data)(struct ibv_cq_ex *current);
uint32_t (*read_qp_num)(struct ibv_cq_ex *current);
uint32_t (*read_src_qp)(struct ibv_cq_ex *current);
- int (*read_wc_flags)(struct ibv_cq_ex *current);
+ unsigned int (*read_wc_flags)(struct ibv_cq_ex *current);
uint32_t (*read_slid)(struct ibv_cq_ex *current);
uint8_t (*read_sl)(struct ibv_cq_ex *current);
uint8_t (*read_dlid_path_bits)(struct ibv_cq_ex *current);
@@ -1290,7 +1290,7 @@ static inline uint32_t ibv_wc_read_src_qp(struct ibv_cq_ex *cq)
return cq->read_src_qp(cq);
}
-static inline int ibv_wc_read_wc_flags(struct ibv_cq_ex *cq)
+static inline unsigned int ibv_wc_read_wc_flags(struct ibv_cq_ex *cq)
{
return cq->read_wc_flags(cq);
}
@@ -501,7 +501,7 @@ static uint32_t mlx4_cq_read_wc_qp_num(struct ibv_cq_ex *ibcq)
return be32toh(cq->cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK;
}
-static int mlx4_cq_read_wc_flags(struct ibv_cq_ex *ibcq)
+static unsigned int mlx4_cq_read_wc_flags(struct ibv_cq_ex *ibcq)
{
struct mlx4_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
int is_send = cq->cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
@@ -1300,7 +1300,7 @@ static inline uint32_t mlx5_cq_read_wc_qp_num(struct ibv_cq_ex *ibcq)
return be32toh(cq->cqe64->sop_drop_qpn) & 0xffffff;
}
-static inline int mlx5_cq_read_wc_flags(struct ibv_cq_ex *ibcq)
+static inline unsigned int mlx5_cq_read_wc_flags(struct ibv_cq_ex *ibcq)
{
struct mlx5_cq *cq = to_mcq(ibv_cq_ex_to_cq(ibcq));
int wc_flags = 0;
@@ -278,7 +278,7 @@ struct mlx5_context {
struct list_head hugetlb_list;
int cqe_version;
uint8_t cached_link_layer[MLX5_MAX_PORTS_NUM];
- int cached_device_cap_flags;
+ unsigned int cached_device_cap_flags;
enum ibv_atomic_cap atomic_cap;
struct {
uint64_t offset;