@@ -312,6 +312,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
.eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
+ .eswitch_spool_size_set = mlx5_devlink_eswitch_spool_size_set,
+ .eswitch_spool_size_get = mlx5_devlink_eswitch_spool_size_get,
.rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set,
.rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set,
.rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set,
@@ -378,6 +378,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
struct {
u32 large_group_num;
+ u32 shared_rx_ring_counts;
+ bool enable_shared_rx_ring;
} params;
struct blocking_notifier_head n_head;
struct xarray paired;
@@ -549,6 +551,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap);
+int mlx5_devlink_eswitch_spool_size_set(struct devlink *devlink, u32 size,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_eswitch_spool_size_get(struct devlink *devlink, u32 *size);
int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
u8 *hw_addr, int *hw_addr_len,
struct netlink_ext_ack *extack);
@@ -4019,6 +4019,55 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
return 0;
}
+int mlx5_devlink_eswitch_spool_size_set(struct devlink *devlink,
+ u32 spool_size,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw;
+ bool enable;
+ int err = 0;
+ int counts;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ down_write(&esw->mode_lock);
+ if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't enable shared pool in switchdev mode");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ counts = spool_size >> PAGE_SHIFT;
+ enable = !(counts == 0);
+ esw->params.enable_shared_rx_ring = enable;
+ esw->params.shared_rx_ring_counts = enable ? counts : 0;
+
+out:
+ up_write(&esw->mode_lock);
+ return err;
+}
+
+int mlx5_devlink_eswitch_spool_size_get(struct devlink *devlink,
+ u32 *spool_size)
+{
+ struct mlx5_eswitch *esw;
+ bool enable;
+
+ esw = mlx5_devlink_eswitch_get(devlink);
+ if (IS_ERR(esw))
+ return PTR_ERR(esw);
+
+ enable = esw->params.enable_shared_rx_ring;
+ if (enable)
+ *spool_size = esw->params.shared_rx_ring_counts << PAGE_SHIFT;
+ else
+ *spool_size = 0;
+
+ return 0;
+}
+
static bool
mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
{
Add devlink spool_size attribe support for eswitch shared memory pool. This is used to configure the shared memory pool for eswitch. Signed-off-by: William Tu <witu@nvidia.com> --- .../net/ethernet/mellanox/mlx5/core/devlink.c | 2 + .../net/ethernet/mellanox/mlx5/core/eswitch.h | 5 ++ .../mellanox/mlx5/core/eswitch_offloads.c | 49 +++++++++++++++++++ 3 files changed, 56 insertions(+)