@@ -715,9 +715,20 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
uplink_priv = &uplink_rpriv->uplink_priv;
ct_priv = uplink_priv->ct_priv;
- if (!mlx5_ipsec_is_rx_flow(cqe) &&
- !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id,
- &tc_priv))
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (!(tunnel_id >> ESW_TUN_OPTS_BITS)) {
+ u32 mapped_id;
+ u32 metadata;
+
+ mapped_id = tunnel_id & ESW_IPSEC_RX_MAPPED_ID_MASK;
+ if (mapped_id &&
+ !mlx5_esw_ipsec_rx_make_metadata(priv, mapped_id, &metadata))
+ mlx5e_ipsec_offload_handle_rx_skb(priv->netdev, skb, metadata);
+ }
+#endif
+
+ if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv,
+ zone_restore_id, tunnel_id, &tc_priv))
goto free_skb;
forward:
@@ -210,6 +210,7 @@ struct mlx5e_ipsec_rx {
struct mlx5e_ipsec_fc *fc;
struct mlx5_fs_chains *chains;
u8 allow_tunnel_mode : 1;
+ struct xarray ipsec_obj_id_map;
};
struct mlx5e_ipsec {
@@ -256,6 +257,7 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_work *work;
struct mlx5e_ipsec_dwork *dwork;
struct mlx5e_ipsec_limits limits;
+ u32 rx_mapped_id;
};
struct mlx5_accel_pol_xfrm_attrs {
@@ -1153,6 +1153,9 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
err = setup_modify_header(ipsec, attrs->type,
sa_entry->ipsec_obj_id | BIT(31),
XFRM_DEV_OFFLOAD_IN, &flow_act);
+ else
+ err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
+
if (err)
goto err_mod_header;
@@ -1641,6 +1644,7 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
}
mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+ mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
@@ -1693,6 +1697,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
kfree(ipsec->rx_ipv6);
if (ipsec->is_uplink_rep) {
+ xa_destroy(&ipsec->rx_esw->ipsec_obj_id_map);
+
mutex_destroy(&ipsec->tx_esw->ft.mutex);
WARN_ON(ipsec->tx_esw->ft.refcnt);
kfree(ipsec->tx_esw);
@@ -1753,6 +1759,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
mutex_init(&ipsec->tx_esw->ft.mutex);
mutex_init(&ipsec->rx_esw->ft.mutex);
ipsec->tx_esw->ns = ns_esw;
+ xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
}
@@ -37,6 +37,7 @@
#include "ipsec.h"
#include "ipsec_rxtx.h"
#include "en.h"
+#include "esw/ipsec_fs.h"
enum {
MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
@@ -355,3 +356,24 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
}
+
+int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ u32 ipsec_obj_id;
+ int err;
+
+ if (!ipsec || !ipsec->is_uplink_rep)
+ return -EINVAL;
+
+ err = mlx5_esw_ipsec_rx_ipsec_obj_id_search(priv, id, &ipsec_obj_id);
+ if (err) {
+ atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ return err;
+ }
+
+ *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id,
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
+
+ return 0;
+}
@@ -43,6 +43,7 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
+#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo;
@@ -67,6 +68,7 @@ void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
u32 ipsec_meta_data);
+int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata);
static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
{
return ipsec_st->tailen;
@@ -182,3 +182,72 @@ int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
return 0;
}
+
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_modify_hdr *modify_hdr;
+ u32 mapped_id;
+ int err;
+
+ err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+ xa_mk_value(sa_entry->ipsec_obj_id),
+ XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
+ if (err)
+ return err;
+
+ /* reuse tunnel bits for ipsec,
+ * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id.
+ */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS);
+ MLX5_SET(set_action_in, action, length,
+ ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS);
+ MLX5_SET(set_action_in, action, data, mapped_id);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_header_alloc;
+ }
+
+ sa_entry->rx_mapped_id = mapped_id;
+ flow_act->modify_hdr = modify_hdr;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return 0;
+
+err_header_alloc:
+ xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+
+ if (sa_entry->rx_mapped_id)
+ xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+ sa_entry->rx_mapped_id);
+}
+
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ void *val;
+
+ val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+ if (!val)
+ return -ENOENT;
+
+ *ipsec_obj_id = xa_to_value(val);
+
+ return 0;
+}
@@ -5,6 +5,7 @@
#define __MLX5_ESW_IPSEC_FS_H__
struct mlx5e_ipsec;
+struct mlx5e_ipsec_sa_entry;
#ifdef CONFIG_MLX5_ESWITCH
void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
@@ -16,6 +17,11 @@ void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx_create_attr *attr);
int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
struct mlx5_flow_destination *dest);
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act);
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id);
#else
static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx) {}
@@ -35,5 +41,19 @@ static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ips
{
return -EINVAL;
}
+
+static inline int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act)
+{
+ return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) {}
+
+static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_ESW_IPSEC_FS_H__ */
@@ -144,6 +144,9 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \
ESW_TUN_OPTS_OFFSET + 1)
+/* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */
+#define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0)
+
u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev);
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw);