@@ -786,6 +786,8 @@ struct gve_priv {
u16 header_buf_size; /* device configured, header-split supported if non-zero */
bool header_split_enabled; /* True if the header split is enabled by the user */
+
+ u32 max_flow_rules;
};
enum gve_service_task_flags_bit {
@@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -189,6 +190,23 @@ void gve_parse_device_option(struct gve_priv *priv,
if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
priv->default_min_ring_size = true;
break;
+ case GVE_DEV_OPT_ID_FLOW_STEERING:
+ if (option_length < sizeof(**dev_op_flow_steering) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Flow Steering",
+ (int)sizeof(**dev_op_flow_steering),
+ GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_flow_steering))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Flow Steering");
+ *dev_op_flow_steering = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -208,6 +226,7 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_flow_steering **dev_op_flow_steering,
struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -230,7 +249,7 @@ gve_process_device_options(struct gve_priv *priv,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
dev_op_dqo_qpl, dev_op_buffer_sizes,
- dev_op_modify_ring);
+ dev_op_flow_steering, dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -838,6 +857,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
*dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
*dev_op_buffer_sizes,
+ const struct gve_device_option_flow_steering
+ *dev_op_flow_steering,
const struct gve_device_option_modify_ring
*dev_op_modify_ring)
{
@@ -890,10 +911,22 @@ static void gve_enable_supported_features(struct gve_priv *priv,
priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
}
}
+
+ if (dev_op_flow_steering &&
+ (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
+ if (dev_op_flow_steering->max_flow_rules) {
+ priv->max_flow_rules =
+ be32_to_cpu(dev_op_flow_steering->max_flow_rules);
+ dev_info(&priv->pdev->dev,
+ "FLOW STEERING device option enabled with max rule limit of %u.\n",
+ priv->max_flow_rules);
+ }
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
@@ -930,6 +963,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
&dev_op_gqi_qpl, &dev_op_dqo_rda,
&dev_op_jumbo_frames, &dev_op_dqo_qpl,
&dev_op_buffer_sizes,
+ &dev_op_flow_steering,
&dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -969,9 +1003,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
/* set default descriptor counts */
gve_set_default_desc_cnt(priv, descriptor);
- /* DQO supports LRO. */
if (!gve_is_gqi(priv))
- priv->dev->hw_features |= NETIF_F_LRO;
+ priv->dev->hw_features |= NETIF_F_LRO | NETIF_F_NTUPLE;
priv->max_registered_pages =
be64_to_cpu(descriptor->max_registered_pages);
@@ -991,7 +1024,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
- dev_op_buffer_sizes, dev_op_modify_ring);
+ dev_op_buffer_sizes, dev_op_flow_steering,
+ dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -146,6 +146,14 @@ struct gve_device_option_modify_ring {
static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
+struct gve_device_option_flow_steering {
+ __be32 supported_features_mask;
+ __be32 reserved;
+ __be32 max_flow_rules;
+};
+
+static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -163,6 +171,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
};
enum gve_dev_opt_req_feat_mask {
@@ -174,12 +183,14 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
};
enum gve_sup_feature_mask {
GVE_SUP_MODIFY_RING_MASK = 1 << 0,
GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0