@@ -755,8 +755,10 @@ static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
mlx5_eswitch_unblock_mode_lock(ipsec->mdev);
- if (tx == ipsec->tx_esw)
+ if (tx == ipsec->tx_esw) {
+ mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
+ }
tx_destroy(ipsec, tx, ipsec->roce);
@@ -5,6 +5,9 @@
#include "eswitch.h"
#include "en_accel/ipsec.h"
#include "esw/ipsec_fs.h"
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+#include "en/tc_priv.h"
+#endif
enum {
MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
@@ -267,3 +270,56 @@ void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
attr->cnt_level = MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL;
attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
}
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static int mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
+ int err;
+
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ if (esw_attr->out_count - esw_attr->split_count > 1)
+ return 0;
+
+ err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr,
+ esw_attr->out_count - 1);
+
+ return err;
+}
+#endif
+
+void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
+{
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5e_rep_priv *rpriv;
+ struct rhashtable_iter iter;
+ struct mlx5e_tc_flow *flow;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&esw->offloads.vport_reps, i, rep) {
+ rpriv = rep->rep_data[REP_ETH].priv;
+ if (!rpriv || !rpriv->netdev)
+ continue;
+
+ rhashtable_walk_enter(&rpriv->tc_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while ((flow = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(flow))
+ continue;
+
+ err = mlx5_esw_ipsec_modify_flow_dests(esw, flow);
+ if (err)
+ mlx5_core_warn_once(mdev,
+ "Faided to modify flow dests for IPsec");
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ }
+#endif
+}
@@ -24,6 +24,7 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
u32 *ipsec_obj_id);
void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr);
+void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
#else
static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx) {}
@@ -60,5 +61,7 @@ static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv,
static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_tx_create_attr *attr) {}
+
+static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
@@ -811,6 +811,8 @@ mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
return esw->fdb_table.offloads.slow_fdb;
}
+int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *esw_attr, int attr_idx);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
@@ -424,10 +424,51 @@ esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 l
mlx5_chains_put_table(chains, chain, prio, level);
}
+static bool esw_same_vhca_id(struct mlx5_core_dev *mdev1, struct mlx5_core_dev *mdev2)
+{
+ return MLX5_CAP_GEN(mdev1, vhca_id) == MLX5_CAP_GEN(mdev2, vhca_id);
+}
+
+static bool esw_setup_uplink_fwd_ipsec_needed(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx)
+{
+ if (esw->offloads.ft_ipsec_tx_pol &&
+ esw_attr->dests[attr_idx].rep &&
+ esw_attr->dests[attr_idx].rep->vport == MLX5_VPORT_UPLINK &&
+ /* To be aligned with software, encryption is needed only for tunnel device */
+ (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) &&
+ esw_attr->dests[attr_idx].rep != esw_attr->in_rep &&
+ esw_same_vhca_id(esw_attr->dests[attr_idx].mdev, esw->dev))
+ return true;
+
+ return false;
+}
+
+static bool esw_flow_dests_fwd_ipsec_check(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *esw_attr)
+{
+ int i;
+
+ if (!esw->offloads.ft_ipsec_tx_pol)
+ return true;
+
+ for (i = 0; i < esw_attr->split_count; i++)
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i))
+ return false;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, i) &&
+ (esw_attr->out_count - esw_attr->split_count > 1))
+ return false;
+
+ return true;
+}
+
static void
-esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
- struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
- int attr_idx, int dest_idx, bool pkt_reformat)
+esw_setup_dest_fwd_vport(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
{
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
@@ -449,6 +490,33 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
}
}
+static void
+esw_setup_dest_fwd_ipsec(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ dest[dest_idx].ft = esw->offloads.ft_ipsec_tx_pol;
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ if (pkt_reformat &&
+ esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+}
+
+static void
+esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ if (esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
+ esw_setup_dest_fwd_ipsec(dest, flow_act, esw, esw_attr,
+ attr_idx, dest_idx, pkt_reformat);
+ else
+ esw_setup_dest_fwd_vport(dest, flow_act, esw, esw_attr,
+ attr_idx, dest_idx, pkt_reformat);
+}
+
static int
esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
@@ -575,6 +643,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
return ERR_PTR(-EOPNOTSUPP);
+ if (!esw_flow_dests_fwd_ipsec_check(esw, esw_attr))
+ return ERR_PTR(-EOPNOTSUPP);
+
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
@@ -4374,3 +4445,19 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
mutex_unlock(&esw->state_lock);
return err;
}
+
+int
+mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
+ struct mlx5_esw_flow_attr *esw_attr, int attr_idx)
+{
+ struct mlx5_flow_destination new_dest = {};
+ struct mlx5_flow_destination old_dest = {};
+
+ if (!esw_setup_uplink_fwd_ipsec_needed(esw, esw_attr, attr_idx))
+ return 0;
+
+ esw_setup_dest_fwd_ipsec(&old_dest, NULL, esw, esw_attr, attr_idx, 0, false);
+ esw_setup_dest_fwd_vport(&new_dest, NULL, esw, esw_attr, attr_idx, 0, false);
+
+ return mlx5_modify_rule_destination(rule, &new_dest, &old_dest);
+}