@@ -1532,9 +1532,9 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
esw->qos.enabled = false;
}
-static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport,
- u32 initial_max_rate, u32 initial_bw_share)
+int mlx5_eswitch_vport_enable_qos(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u32 initial_max_rate, u32 initial_bw_share)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
@@ -1573,8 +1573,8 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
return 0;
}
-static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void mlx5_eswitch_vport_disable_qos(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int err;
@@ -1795,8 +1795,8 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
goto done;
/* Attach vport to the eswitch rate limiter */
- if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
- vport->qos.bw_share))
+ if (mlx5_eswitch_vport_enable_qos(esw, vport, vport->info.max_rate,
+ vport->qos.bw_share))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
@@ -1840,7 +1840,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
- esw_vport_disable_qos(esw, vport);
+ mlx5_eswitch_vport_disable_qos(esw, vport);
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
esw->mode == MLX5_ESWITCH_LEGACY)
@@ -298,6 +298,13 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
u16 vport,
struct ifla_vf_stats *vf_stats);
+int mlx5_eswitch_vport_enable_qos(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u32 initial_max_rate, u32 initial_bw_share);
+void mlx5_eswitch_vport_disable_qos(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int mlx5_eswitch_setup_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_eswitch_cleanup_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
@@ -1624,6 +1624,117 @@ static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw)
return err;
}
+static int esw_offloads_load_vport_reps(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_eswitch_rep *rep;
+ u8 rep_type;
+ int err;
+
+ rep = mlx5_eswitch_get_rep(esw, vport_num);
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err) {
+ esw_warn(esw->dev, "Load vport(%d) rep type(%d) err!\n",
+ vport_num, rep_type);
+ goto err_reps;
+ }
+ }
+
+ return 0;
+
+err_reps:
+ while (rep_type-- > 0)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+ return err;
+}
+
+static void
+esw_offloads_unload_vport_reps(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_eswitch_rep *rep;
+ u8 rep_type = NUM_REP_TYPES;
+
+ rep = mlx5_eswitch_get_rep(esw, vport_num);
+ while (rep_type-- > 0)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+}
+
+static int
+esw_enable_sf_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ int ret;
+
+ ret = esw_vport_create_offloads_acl_tables(esw, vport);
+ if (ret)
+ return ret;
+
+ mutex_lock(&esw->state_lock);
+
+ mlx5_modify_nic_vport_mac_address(esw->dev, vport->vport, vport->info.mac);
+ mlx5_modify_nic_vport_node_guid(esw->dev, vport->vport,
+ vport->info.node_guid);
+
+ /* Attach vport to the eswitch rate limiter */
+ ret = mlx5_eswitch_vport_enable_qos(esw, vport, vport->info.max_rate,
+ vport->qos.bw_share);
+ if (ret)
+ goto qos_err;
+
+ vport->enabled = true;
+ esw_debug(esw->dev, "Enabled SF vport(0x%x)\n", vport->vport);
+
+ mutex_unlock(&esw->state_lock);
+ return 0;
+
+qos_err:
+ mutex_unlock(&esw->state_lock);
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
+ return ret;
+}
+
+static void
+esw_disable_sf_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ mutex_lock(&esw->state_lock);
+
+ esw_debug(esw->dev, "Disabling vport(0x%x)\n", vport->vport);
+ vport->enabled = false;
+ mlx5_eswitch_vport_disable_qos(esw, vport);
+
+ mutex_unlock(&esw->state_lock);
+
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
+}
+
+int mlx5_eswitch_setup_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+ int ret;
+
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ ret = esw_enable_sf_vport(esw, vport);
+ if (ret)
+ return ret;
+
+ ret = esw_offloads_load_vport_reps(esw, vport_num);
+ if (ret)
+ esw_disable_sf_vport(esw, vport);
+ return ret;
+}
+
+void mlx5_eswitch_cleanup_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (IS_ERR(vport))
+ return;
+
+ esw_offloads_unload_vport_reps(esw, vport_num);
+ esw_disable_sf_vport(esw, vport);
+}
+
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
@@ -7,6 +7,7 @@
#include <linux/bitmap.h>
#include "sf.h"
#include "mlx5_core.h"
+#include "eswitch.h"
static int
mlx5_cmd_query_sf_partitions(struct mlx5_core_dev *mdev, u32 *out, int outlen)
@@ -149,11 +150,17 @@ mlx5_sf_alloc(struct mlx5_core_dev *coredev, struct mlx5_sf_table *sf_table,
if (ret)
goto enable_err;
+ ret = mlx5_eswitch_setup_sf_vport(coredev->priv.eswitch, hw_function_id);
+ if (ret)
+ goto vport_err;
+
sf->idx = sf_id;
sf->base_addr = sf_table->base_address +
(sf->idx << (sf_table->log_sf_bar_size + 12));
return sf;
+vport_err:
+ mlx5_core_disable_sf_hca(coredev, hw_function_id);
enable_err:
mlx5_cmd_dealloc_sf(coredev, hw_function_id);
alloc_sf_err:
@@ -169,6 +176,7 @@ void mlx5_sf_free(struct mlx5_core_dev *coredev, struct mlx5_sf_table *sf_table,
u16 hw_function_id;
hw_function_id = mlx5_sf_hw_id(coredev, sf->idx);
+ mlx5_eswitch_cleanup_sf_vport(coredev->priv.eswitch, hw_function_id);
mlx5_core_disable_sf_hca(coredev, hw_function_id);
mlx5_cmd_dealloc_sf(coredev, hw_function_id);
free_sf_id(sf_table, sf->idx);