@@ -7,6 +7,72 @@
#include "ena_devlink.h"
+static int ena_devlink_llq_header_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack);
+
+enum ena_devlink_param_id {
+ ENA_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ ENA_DEVLINK_PARAM_ID_LLQ_HEADER_SIZE,
+};
+
+static const struct devlink_param ena_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(ENA_DEVLINK_PARAM_ID_LLQ_HEADER_SIZE,
+ "large_llq_header", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ena_devlink_llq_header_validate),
+};
+
+static int ena_devlink_llq_header_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ bool value = val.vbool;
+
+ if (!value)
+ return 0;
+
+ if (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+ NL_SET_ERR_MSG_MOD(extack, "Instance doesn't support LLQ");
+ return -EOPNOTSUPP;
+ }
+
+ if (!adapter->large_llq_header_supported) {
+ NL_SET_ERR_MSG_MOD(extack, "Instance doesn't support large LLQ");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void ena_devlink_params_get(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ ENA_DEVLINK_PARAM_ID_LLQ_HEADER_SIZE,
+ &val);
+ if (err) {
+ netdev_err(adapter->netdev, "Failed to query LLQ header size param\n");
+ return;
+ }
+
+ adapter->large_llq_header_enabled = val.vbool;
+}
+
+void ena_devlink_disable_large_llq_header_param(struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(devlink,
+ ENA_DEVLINK_PARAM_ID_LLQ_HEADER_SIZE,
+ value);
+}
+
static int ena_devlink_reload_down(struct devlink *devlink,
bool netns_change,
enum devlink_reload_action action,
@@ -78,6 +144,29 @@ static const struct devlink_ops ena_devlink_ops = {
.reload_up = ena_devlink_reload_up,
};
+static int ena_devlink_configure_params(struct devlink *devlink)
+{
+ struct ena_adapter *adapter = ENA_DEVLINK_PRIV(devlink);
+ union devlink_param_value value;
+ int rc;
+
+ rc = devlink_params_register(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
+ if (rc) {
+ netdev_err(adapter->netdev, "Failed to register devlink params\n");
+ return rc;
+ }
+
+ value.vbool = adapter->large_llq_header_enabled;
+ devlink_param_driverinit_value_set(devlink,
+ ENA_DEVLINK_PARAM_ID_LLQ_HEADER_SIZE,
+ value);
+
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+
+ return 0;
+}
+
struct devlink *ena_devlink_alloc(struct ena_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -89,16 +178,29 @@ struct devlink *ena_devlink_alloc(struct ena_adapter *adapter)
return NULL;
}
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
-
ENA_DEVLINK_PRIV(devlink) = adapter;
adapter->devlink = devlink;
+ if (ena_devlink_configure_params(devlink))
+ goto free_devlink;
+
return devlink;
+free_devlink:
+ devlink_free(devlink);
+
+ return NULL;
+}
+
+static void ena_devlink_configure_params_clean(struct devlink *devlink)
+{
+ devlink_params_unregister(devlink, ena_devlink_params,
+ ARRAY_SIZE(ena_devlink_params));
}
void ena_devlink_free(struct devlink *devlink)
{
+ ena_devlink_configure_params_clean(devlink);
+
devlink_free(devlink);
}
@@ -16,5 +16,7 @@ struct devlink *ena_devlink_alloc(struct ena_adapter *adapter);
void ena_devlink_free(struct devlink *devlink);
void ena_devlink_register(struct devlink *devlink, struct device *dev);
void ena_devlink_unregister(struct devlink *devlink);
+void ena_devlink_params_get(struct devlink *devlink);
+void ena_devlink_disable_large_llq_header_param(struct devlink *devlink);
#endif /* DEVLINK_H */
@@ -3385,13 +3385,30 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
return 0;
}
-static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+static void set_default_llq_configurations(struct ena_adapter *adapter,
+ struct ena_llq_configurations *llq_config,
+ struct ena_admin_feature_llq_desc *llq)
{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
- llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
- llq_config->llq_ring_entry_size_value = 128;
+
+ adapter->large_llq_header_supported =
+ !!(ena_dev->supported_features & (1 << ENA_ADMIN_LLQ));
+ adapter->large_llq_header_supported &=
+ !!(llq->entry_size_ctrl_supported &
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B);
+
+ if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
+ adapter->large_llq_header_enabled) {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+ llq_config->llq_ring_entry_size_value = 256;
+ } else {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_config->llq_ring_entry_size_value = 128;
+ }
}
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
@@ -3493,6 +3510,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
goto err_mmio_read_less;
}
+ ena_devlink_params_get(adapter->devlink);
+
/* ENA admin level init */
rc = ena_com_admin_init(ena_dev, &aenq_handlers);
if (rc) {
@@ -3533,7 +3552,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
- set_default_llq_configurations(&llq_config);
+ set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq);
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
@@ -4212,6 +4231,26 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter,
max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+ /* When forcing large headers, we multiply the entry size by 2,
+ * and therefore divide the queue size by 2, leaving the amount
+ * of memory used by the queues unchanged.
+ */
+ if (adapter->large_llq_header_enabled) {
+ if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
+ (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)) {
+ max_tx_queue_size /= 2;
+ dev_info(&adapter->pdev->dev,
+ "Forcing large headers and decreasing maximum TX queue size to %d\n",
+ max_tx_queue_size);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
+
+ adapter->large_llq_header_enabled = false;
+ ena_devlink_disable_large_llq_header_param(adapter->devlink);
+ }
+ }
+
tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
max_tx_queue_size);
rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
@@ -336,6 +336,14 @@ struct ena_adapter {
u32 msg_enable;
+ /* The flag is used for two purposes:
+ * 1. Indicates that large LLQ has been requested.
+ * 2. Indicates whether large LLQ is set or not after device
+ * initialization / configuration.
+ */
+ bool large_llq_header_enabled;
+ bool large_llq_header_supported;
+
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;