@@ -75,6 +75,10 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
+ifneq ($(CONFIG_MLX5_EN_IPSEC),)
+ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/ipsec_fs.o
+endif
+
mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge_debugfs.o \
en/rep/bridge.o
@@ -143,7 +143,7 @@ struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_tx_drop_trailer;
};
-struct mlx5e_ipsec_rx;
+struct mlx5e_ipsec_fc;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec_work {
@@ -180,6 +180,38 @@ struct mlx5e_ipsec_rx_create_attr {
enum mlx5_flow_namespace_type chains_ns;
};
+struct mlx5e_ipsec_ft {
+ struct mutex mutex; /* Protect changes to this struct */
+ struct mlx5_flow_table *pol;
+ struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *status;
+ u32 refcnt;
+};
+
+struct mlx5e_ipsec_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_fc *fc;
+};
+
+struct mlx5e_ipsec_miss {
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_miss status_drop;
+ struct mlx5_fc *status_drop_cnt;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ u8 allow_tunnel_mode : 1;
+};
+
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
struct xarray sadb;
@@ -205,13 +237,6 @@ struct mlx5e_ipsec_esn_state {
u8 overlap: 1;
};
-struct mlx5e_ipsec_rule {
- struct mlx5_flow_handle *rule;
- struct mlx5_modify_hdr *modify_hdr;
- struct mlx5_pkt_reformat *pkt_reformat;
- struct mlx5_fc *fc;
-};
-
struct mlx5e_ipsec_limits {
u64 round;
u8 soft_limit_hit : 1;
@@ -9,6 +9,7 @@
#include "fs_core.h"
#include "lib/ipsec_fs_roce.h"
#include "lib/fs_chains.h"
+#include "esw/ipsec_fs.h"
#define NUM_IPSEC_FTE BIT(15)
#define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
@@ -19,29 +20,6 @@ struct mlx5e_ipsec_fc {
struct mlx5_fc *drop;
};
-struct mlx5e_ipsec_ft {
- struct mutex mutex; /* Protect changes to this struct */
- struct mlx5_flow_table *pol;
- struct mlx5_flow_table *sa;
- struct mlx5_flow_table *status;
- u32 refcnt;
-};
-
-struct mlx5e_ipsec_miss {
- struct mlx5_flow_group *group;
- struct mlx5_flow_handle *rule;
-};
-
-struct mlx5e_ipsec_rx {
- struct mlx5e_ipsec_ft ft;
- struct mlx5e_ipsec_miss pol;
- struct mlx5e_ipsec_miss sa;
- struct mlx5e_ipsec_rule status;
- struct mlx5e_ipsec_fc *fc;
- struct mlx5_fs_chains *chains;
- u8 allow_tunnel_mode : 1;
-};
-
struct mlx5e_ipsec_tx {
struct mlx5e_ipsec_ft ft;
struct mlx5e_ipsec_miss pol;
@@ -259,9 +237,9 @@ static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
struct mlx5e_ipsec_rx *rx, u32 family)
{
-
/* disconnect */
- ipsec_rx_ft_disconnect(ipsec, family);
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_disconnect(ipsec, family);
if (rx->chains) {
ipsec_chains_destroy(rx->chains);
@@ -276,8 +254,12 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_destroy_flow_table(rx->ft.sa);
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ if (rx == ipsec->rx_esw) {
+ mlx5_esw_ipsec_rx_status_destroy(ipsec, rx);
+ } else {
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ }
mlx5_destroy_flow_table(rx->ft.status);
mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
@@ -288,6 +270,13 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
u32 family,
struct mlx5e_ipsec_rx_create_attr *attr)
{
+ if (rx == ipsec->rx_esw) {
+ /* For packet offload in switchdev mode, RX & TX use FDB namespace */
+ attr->ns = ipsec->tx_esw->ns;
+ mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
+ return;
+ }
+
attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
attr->family = family;
@@ -306,6 +295,9 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
struct mlx5_flow_table *ft;
int err;
+ if (rx == ipsec->rx_esw)
+ return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
+
*dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
@@ -357,7 +349,10 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
- err = ipsec_status_rule(mdev, rx, dest);
+ if (rx == ipsec->rx_esw)
+ err = mlx5_esw_ipsec_rx_status_create(ipsec, rx, dest);
+ else
+ err = ipsec_status_rule(mdev, rx, dest);
if (err)
goto err_add;
@@ -406,7 +401,8 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
connect:
/* connect */
- ipsec_rx_ft_connect(ipsec, rx, &attr);
+ if (rx != ipsec->rx_esw)
+ ipsec_rx_ft_connect(ipsec, rx, &attr);
return 0;
err_pol_miss:
@@ -864,18 +860,22 @@ static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upsp
}
}
-static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec, u8 dir)
+static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
+ int type, u8 dir)
{
+ if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
+ return MLX5_FLOW_NAMESPACE_FDB;
+
if (dir == XFRM_DEV_OFFLOAD_IN)
return MLX5_FLOW_NAMESPACE_KERNEL;
return MLX5_FLOW_NAMESPACE_EGRESS;
}
-static int setup_modify_header(struct mlx5e_ipsec *ipsec, u32 val, u8 dir,
+static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
struct mlx5_flow_act *flow_act)
{
- enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, dir);
+ enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_modify_hdr *modify_hdr;
@@ -1085,7 +1085,8 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5_flow_act *flow_act)
{
- enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->dir);
+ enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
+ attrs->dir);
struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_pkt_reformat *pkt_reformat;
@@ -1127,7 +1128,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
struct mlx5_flow_spec *spec;
struct mlx5e_ipsec_rx *rx;
struct mlx5_fc *counter;
- int err;
+ int err = 0;
rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
if (IS_ERR(rx))
@@ -1148,8 +1149,10 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_esp(spec);
setup_fte_no_frags(spec);
- err = setup_modify_header(ipsec, sa_entry->ipsec_obj_id | BIT(31),
- XFRM_DEV_OFFLOAD_IN, &flow_act);
+ if (rx != ipsec->rx_esw)
+ err = setup_modify_header(ipsec, attrs->type,
+ sa_entry->ipsec_obj_id | BIT(31),
+ XFRM_DEV_OFFLOAD_IN, &flow_act);
if (err)
goto err_mod_header;
@@ -1340,7 +1343,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
if (!attrs->reqid)
break;
- err = setup_modify_header(ipsec, attrs->reqid,
+ err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
XFRM_DEV_OFFLOAD_OUT, &flow_act);
if (err)
goto err_mod_header;
@@ -1388,6 +1391,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
{
struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+ struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
struct mlx5_flow_destination dest[2];
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@@ -1433,6 +1437,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
}
flow_act.flags |= FLOW_ACT_NO_APPEND;
+ if (rx == ipsec->rx_esw && rx->chains)
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dstn].ft = rx->ft.sa;
dstn++;
@@ -45,8 +45,9 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
- if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
+ if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
caps |= MLX5_IPSEC_CAP_PRIO;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
new file mode 100644
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en_accel/ipsec.h"
+#include "esw/ipsec_fs.h"
+
+enum {
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
+ MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
+ MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+};
+
+static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status_drop.rule);
+ mlx5_destroy_flow_group(rx->status_drop.group);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
+}
+
+static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+}
+
+static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status_drop.group = g;
+ rx->status_drop.rule = rule;
+ rx->status_drop_cnt = flow_counter;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ mlx5_destroy_flow_group(g);
+err_out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to add ipsec rx status pass rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status.rule = rule;
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ kvfree(spec);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ esw_ipsec_rx_status_pass_destroy(ipsec, rx);
+ esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ int err;
+
+ err = esw_ipsec_rx_status_drop_create(ipsec, rx);
+ if (err)
+ return err;
+
+ err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
+ if (err)
+ goto err_pass_create;
+
+ return 0;
+
+err_pass_create:
+ esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ attr->prio = FDB_CRYPTO_INGRESS;
+ attr->pol_level = MLX5_ESW_IPSEC_RX_POL_FT_LEVEL;
+ attr->sa_level = MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL;
+ attr->status_level = MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL;
+ attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
+}
+
+int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = mlx5_chains_get_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_ESW_IPSEC_FS_H__
+#define __MLX5_ESW_IPSEC_FS_H__
+
+struct mlx5e_ipsec;
+
+#ifdef CONFIG_MLX5_ESWITCH
+void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx);
+int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest);
+void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr);
+int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest);
+#else
+static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx) {}
+
+static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr) {}
+
+static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_ESW_IPSEC_FS_H__ */
@@ -2987,6 +2987,12 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (err)
goto out_err;
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_CRYPTO_INGRESS, 3);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
err = create_fdb_fast_path(steering);
if (err)
goto out_err;
@@ -109,6 +109,7 @@ enum mlx5_flow_namespace_type {
enum {
FDB_BYPASS_PATH,
+ FDB_CRYPTO_INGRESS,
FDB_TC_OFFLOAD,
FDB_FT_OFFLOAD,
FDB_TC_MISS,