@@ -60,9 +60,11 @@ void mlx5hws_bwc_rule_fill_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
static inline u16 mlx5hws_bwc_queues(struct mlx5hws_context *ctx)
{
/* Besides the control queue, half of the queues are
- * reguler HWS queues, and the other half are BWC queues.
+ * regular HWS queues, and the other half are BWC queues.
*/
- return (ctx->queues - 1) / 2;
+ if (mlx5hws_context_bwc_supported(ctx))
+ return (ctx->queues - 1) / 2;
+ return 0;
}
static inline u16 mlx5hws_bwc_get_queue_id(struct mlx5hws_context *ctx, u16 idx)
@@ -161,8 +161,10 @@ static int hws_context_init_hws(struct mlx5hws_context *ctx,
if (ret)
goto uninit_pd;
- if (attr->bwc)
- ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
+ /* Context has support for backward compatible API,
+ * and does not have support for native HWS API.
+ */
+ ctx->flags |= MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
ret = mlx5hws_send_queues_open(ctx, attr->queues, attr->queue_size);
if (ret)
@@ -8,6 +8,7 @@ enum mlx5hws_context_flags {
MLX5HWS_CONTEXT_FLAG_HWS_SUPPORT = 1 << 0,
MLX5HWS_CONTEXT_FLAG_PRIVATE_PD = 1 << 1,
MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT = 1 << 2,
+ MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT = 1 << 3,
};
enum mlx5hws_context_shared_stc_type {
@@ -58,6 +59,11 @@ static inline bool mlx5hws_context_bwc_supported(struct mlx5hws_context *ctx)
return ctx->flags & MLX5HWS_CONTEXT_FLAG_BWC_SUPPORT;
}
+static inline bool mlx5hws_context_native_supported(struct mlx5hws_context *ctx)
+{
+ return ctx->flags & MLX5HWS_CONTEXT_FLAG_NATIVE_SUPPORT;
+}
+
bool mlx5hws_context_cap_dynamic_reparse(struct mlx5hws_context *ctx);
u8 mlx5hws_context_get_reparse_mode(struct mlx5hws_context *ctx);
@@ -70,7 +70,6 @@ enum mlx5hws_send_queue_actions {
struct mlx5hws_context_attr {
u16 queues;
u16 queue_size;
- bool bwc; /* add support for backward compatible API*/
};
struct mlx5hws_table_attr {
@@ -898,6 +898,9 @@ static int mlx5hws_send_ring_open(struct mlx5hws_context *ctx,
static void hws_send_queue_close(struct mlx5hws_send_engine *queue)
{
+ if (!queue->num_entries)
+ return; /* this queue wasn't initialized */
+
hws_send_ring_close(queue);
kfree(queue->completed.entries);
}
@@ -1000,12 +1003,33 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
return -ENOMEM;
}
+static int hws_send_queues_open(struct mlx5hws_context *ctx, u16 queue_size)
+{
+ int err = 0;
+ u32 i = 0;
+
+ /* If native API isn't supported, skip the unused native queues:
+ * initialize BWC queues and control queue only.
+ */
+ if (!mlx5hws_context_native_supported(ctx))
+ i = mlx5hws_bwc_get_queue_id(ctx, 0);
+
+ for (; i < ctx->queues; i++) {
+ err = hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
+ if (err) {
+ __hws_send_queues_close(ctx, i);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
u16 queues,
u16 queue_size)
{
int err = 0;
- u32 i;
/* Open one extra queue for control path */
ctx->queues = queues + 1;
@@ -1021,17 +1045,13 @@ int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
goto free_bwc_locks;
}
- for (i = 0; i < ctx->queues; i++) {
- err = hws_send_queue_open(ctx, &ctx->send_queue[i], queue_size);
- if (err)
- goto close_send_queues;
- }
+ err = hws_send_queues_open(ctx, queue_size);
+ if (err)
+ goto free_queues;
return 0;
-close_send_queues:
- __hws_send_queues_close(ctx, i);
-
+free_queues:
kfree(ctx->send_queue);
free_bwc_locks: