From patchwork Thu Sep 21 12:10:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394138 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9447CE7D0A3 for ; Thu, 21 Sep 2023 17:27:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229840AbjIUR14 (ORCPT ); Thu, 21 Sep 2023 13:27:56 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:53474 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230104AbjIUR11 (ORCPT ); Thu, 21 Sep 2023 13:27:27 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 636301987 for ; Thu, 21 Sep 2023 10:01:00 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 843C4C4E661; Thu, 21 Sep 2023 12:10:47 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298248; bh=DgzUVyO94BHRDEnxLuvZy37u6uHpwfBgskzMDwYtI98=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=tMb9BpcBsEMzhCzh8Se5JYn0XxGARmerYvmTGI7Lu4cRG00Zyiw6Si3oSJ24loxVj CV1Av1f+jAy/L4yEbt3SRmBfSLk0azn/Cn2tasOGmRUoIzwa3vejrMPGxS+NV8KSKk ix23ZVjk20FWWPX9NsLIUXJokqnAxLP+MTe5OuM3CwZnX2iPlkuchoM+TmuJ7nHu6Y ZC3kf1PaCy6dGby5ILVL8d78BCiwRBusFHJFOKKSdrWfoHocjGJlVYwVD+/YBv7dMA SMyPp2W0RRD6MGY/lDNVKBL+Ei9GcOFg3jnmmj7MtxWI+nOzzc09zOiQFmoJBMQ9bQ ov0PJnXs8gRXA== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 1/9] RDMA/mlx5: Send events from IB driver about device affiliation state Date: Thu, 21 Sep 2023 15:10:27 +0300 Message-ID: X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad Send blocking events from IB driver whenever the device is done being affiliated or if it is removed from an affiliation. This is useful since now the EN driver can register to those event and know when a device is affiliated or not. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- drivers/infiniband/hw/mlx5/main.c | 17 +++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 6 ++++++ include/linux/mlx5/device.h | 2 ++ include/linux/mlx5/driver.h | 2 ++ 4 files changed, 27 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index aed5cdea50e6..530d88784e41 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -3175,6 +3176,13 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, lockdep_assert_held(&mlx5_ib_multiport_mutex); + mlx5_core_mp_event_replay(ibdev->mdev, + MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, + NULL); + mlx5_core_mp_event_replay(mpi->mdev, + MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, + NULL); + mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); spin_lock(&port->mp.mpi_lock); @@ -3226,6 +3234,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, struct mlx5_ib_multiport_info *mpi) { u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; + u64 key; int err; lockdep_assert_held(&mlx5_ib_multiport_mutex); @@ -3254,6 +3263,14 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, mlx5_ib_init_cong_debugfs(ibdev, port_num); + key = ibdev->ib_dev.index; + mlx5_core_mp_event_replay(mpi->mdev, + MLX5_DRIVER_EVENT_AFFILIATION_DONE, + &key); + mlx5_core_mp_event_replay(ibdev->mdev, + MLX5_DRIVER_EVENT_AFFILIATION_DONE, + &key); + return true; unbind: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d17c9c31b165..307ffe6300f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -361,6 +361,12 @@ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay); +void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data) +{ + mlx5_blocking_notifier_call_chain(dev, event, data); +} +EXPORT_SYMBOL(mlx5_core_mp_event_replay); + int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, enum mlx5_cap_mode cap_mode) { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8fbe22de16ef..820bca965fb6 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -367,6 +367,8 @@ enum mlx5_driver_event { MLX5_DRIVER_EVENT_MACSEC_SA_ADDED, MLX5_DRIVER_EVENT_MACSEC_SA_DELETED, MLX5_DRIVER_EVENT_SF_PEER_DEVLINK, + MLX5_DRIVER_EVENT_AFFILIATION_DONE, + MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, }; enum { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 92434814c855..52e982bc0f50 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1029,6 +1029,8 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev); +void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data); + void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); From patchwork Thu Sep 21 12:10:28 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394136 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0AE62E7D0A2 for ; Thu, 21 Sep 2023 17:26:36 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229910AbjIUR0j (ORCPT ); Thu, 21 Sep 2023 13:26:39 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45600 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229790AbjIUR0g (ORCPT ); Thu, 21 Sep 2023 13:26:36 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 2299E1728 for ; Thu, 21 Sep 2023 10:02:05 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id ED6E5C4E665; Thu, 21 Sep 2023 12:10:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298253; bh=2ZG6vMAgy7CqLO7hZ/8C6A42aJwyr6guU+ay4vXAL9k=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=pzW5scJedeMsydpT2yAi3ouSzuMTwxXdm0FzXPX0JKDztbGTdb+SOARb/R8pkBxvD lstUPZ2VdbYuK7P3II68UDZyl1Ae8vzrFymgcbbtDYRkLY6n5xW03IQFwc6dTi0vP+ KmI8wl5Z1kPohFoYzIe+UhSTm2Dol1knCKrUFCJXFvqoFxucWQXDkSfypFS94VBfTm pYAOfa+82lG3hRN5dnXEyBM+2ECsdKBuHiB9pps4sQxkot9WcD3vXtjj4aAc+1yyXi a7m7zA72bC1yihPbT+JjjAy4yLpySaxPNgRjkiGF3xa/Lb87/KkZDBZDDBKN3mPPlH EES7iVWVU6x3g== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 2/9] net/mlx5: Register mlx5e priv to devcom in MPV mode Date: Thu, 21 Sep 2023 15:10:28 +0300 Message-ID: <279adfa0aa3a1957a339086f2c1739a50b8e4b68.1695296682.git.leon@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad If the device is in MPV mode, the ethernet driver would now register to events from IB driver about core devices affiliation or de-affiliation. Use the key provided in said event to connect each mlx5e priv instance to it's master counterpart, this way the ethernet driver is now aware of who is his master core device and even more, such as knowing if partner device has IPsec configured or not. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + .../mellanox/mlx5/core/en_accel/ipsec.h | 2 + .../net/ethernet/mellanox/mlx5/core/en_main.c | 39 +++++++++++++++++++ .../ethernet/mellanox/mlx5/core/lib/devcom.h | 1 + 4 files changed, 43 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 86f2690c5e01..44785a209466 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -936,6 +936,7 @@ struct mlx5e_priv { struct mlx5e_htb *htb; struct mlx5e_mqprio_rl *mqprio_rl; struct dentry *dfs_root; + struct mlx5_devcom_comp_dev *devcom; }; struct mlx5e_dev { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 9e7c42c2f77b..06743156ffca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -42,6 +42,8 @@ #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L +#define MPV_DEVCOM_MASTER_UP 1 + struct aes_gcm_keymat { u64 seq_iv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a2ae791538ed..f8054da590c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -69,6 +69,7 @@ #include "en/htb.h" #include "qos.h" #include "en/trap.h" +#include "lib/devcom.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) @@ -178,6 +179,37 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) mlx5_notifier_unregister(priv->mdev, &priv->events_nb); } +static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data) +{ + struct mlx5e_priv *slave_priv = my_data; + + mlx5_devcom_comp_set_ready(slave_priv->devcom, true); + + return 0; +} + +static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data) +{ + priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc, + MLX5_DEVCOM_MPV, + *data, + mlx5e_devcom_event_mpv, + priv); + if (IS_ERR_OR_NULL(priv->devcom)) + return -EOPNOTSUPP; + + if (mlx5_core_is_mp_master(priv->mdev)) + mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP, + MPV_DEVCOM_MASTER_UP, priv); + + return 0; +} + +static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv) +{ + mlx5_devcom_unregister_component(priv->devcom); +} + static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) { struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb); @@ -192,6 +224,13 @@ static int blocking_event(struct notifier_block *nb, unsigned long event, void * return NOTIFY_BAD; } break; + case MLX5_DRIVER_EVENT_AFFILIATION_DONE: + if (mlx5e_devcom_init_mpv(priv, data)) + return NOTIFY_BAD; + break; + case MLX5_DRIVER_EVENT_AFFILIATION_REMOVED: + mlx5e_devcom_cleanup_mpv(priv); + break; default: return NOTIFY_DONE; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index 8389ac0af708..8220d180e33c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -8,6 +8,7 @@ enum mlx5_devcom_component { MLX5_DEVCOM_ESW_OFFLOADS, + MLX5_DEVCOM_MPV, MLX5_DEVCOM_NUM_COMPONENTS, }; From patchwork Thu Sep 21 12:10:29 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394050 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 4293EE71096 for ; Thu, 21 Sep 2023 17:02:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229824AbjIURCi (ORCPT ); Thu, 21 Sep 2023 13:02:38 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44926 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229960AbjIURCH (ORCPT ); Thu, 21 Sep 2023 13:02:07 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id AA8AB1BFA for ; Thu, 21 Sep 2023 10:01:04 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 44D75C4E668; Thu, 21 Sep 2023 12:11:06 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298267; bh=5s0TNHVNIGX2PdXzgRwpxXcV3ee6qY167sWDIzLvnns=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=CIGPj46LOZjOK8oEq1JbX+YF7C1ZsALj0lAaQTeAQS7N2vEKnoTU195tve58jAcPx C8cx4FWt3zPBdLSdOoQVjJbbT4t0sOZKfkmzzGdlW0CKus8WKbL5IGi4C4Iyu6o+8c Vn81MCCwRZBJJKwvz48An+jUPCVuzTM3AlA8V4BLqPlz93LBjVdEv+16RHPnq2IjB+ dq3FrOOlzZhrk2vyxunaQkDwW6t3aiaTtRbqF+fFt91wIQ87SZWNwQHRRLW4lV6RT9 fHltTYQuhnJFDQSlif+sIPVDK0m/afIlA6uvv5qYt2bs+8ABV3CleHdxy3VpAEtvbA 9t/oXmWN2nxfA== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 3/9] net/mlx5: Store devcom pointer inside IPsec RoCE Date: Thu, 21 Sep 2023 15:10:29 +0300 Message-ID: <5bb3160ceeb07523542302886da54c78eef0d2af.1695296682.git.leon@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad Store the mlx5e priv devcom component within IPsec RoCE to enable the IPsec RoCE code to access the other device's private information. This includes retrieving the necessary device information and the IPsec database, which helps determine if IPsec is configured or not. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c | 5 +++-- drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c | 6 +++++- drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h | 5 ++++- 5 files changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 7d4ceb9b9c16..49354bc036ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -870,7 +870,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) } ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv); - ret = mlx5e_accel_ipsec_fs_init(ipsec); + ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom); if (ret) goto err_fs_init; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 06743156ffca..dc8e539dc20e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -38,6 +38,7 @@ #include #include #include "lib/aso.h" +#include "lib/devcom.h" #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L @@ -304,7 +305,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv); void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv); void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec); -int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec); +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, struct mlx5_devcom_comp_dev **devcom); int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry); void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry); int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 7dba4221993f..4315e4f3d6cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -1888,7 +1888,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec) } } -int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) +int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, + struct mlx5_devcom_comp_dev **devcom) { struct mlx5_core_dev *mdev = ipsec->mdev; struct mlx5_flow_namespace *ns, *ns_esw; @@ -1940,7 +1941,7 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec) ipsec->tx_esw->ns = ns_esw; xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1); } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) { - ipsec->roce = mlx5_ipsec_fs_roce_init(mdev); + ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c index 6e3f178d6f84..6176680c470c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c @@ -31,6 +31,7 @@ struct mlx5_ipsec_fs { struct mlx5_ipsec_rx_roce ipv4_rx; struct mlx5_ipsec_rx_roce ipv6_rx; struct mlx5_ipsec_tx_roce tx; + struct mlx5_devcom_comp_dev **devcom; }; static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec, @@ -337,7 +338,8 @@ void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce) kfree(ipsec_roce); } -struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev) +struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev, + struct mlx5_devcom_comp_dev **devcom) { struct mlx5_ipsec_fs *roce_ipsec; struct mlx5_flow_namespace *ns; @@ -363,6 +365,8 @@ struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev) roce_ipsec->tx.ns = ns; + roce_ipsec->devcom = devcom; + return roce_ipsec; err_tx: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h index 9712d705fe48..75475e126181 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h @@ -4,6 +4,8 @@ #ifndef __MLX5_LIB_IPSEC_H__ #define __MLX5_LIB_IPSEC_H__ +#include "lib/devcom.h" + struct mlx5_ipsec_fs; struct mlx5_flow_table * @@ -20,6 +22,7 @@ int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_table *pol_ft); void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce); -struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev); +struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev, + struct mlx5_devcom_comp_dev **devcom); #endif /* __MLX5_LIB_IPSEC_H__ */ From patchwork Thu Sep 21 12:10:30 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394051 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8DE70E71094 for ; Thu, 21 Sep 2023 17:03:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230007AbjIURDf (ORCPT ); Thu, 21 Sep 2023 13:03:35 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:44564 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230039AbjIURC5 (ORCPT ); Thu, 21 Sep 2023 13:02:57 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DCA381BD5 for ; Thu, 21 Sep 2023 10:01:00 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 468E4C4E667; Thu, 21 Sep 2023 12:10:57 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298258; bh=P72h0L+ZVrn+TdGMG16f/UywYyrtaoYrOTrlMxfbBqs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=P8I/ZWsbjX++7ZXIiIjLxlStjroC7Jm97O0P5DS04/X63ynTw6MfOGXfgaw+jhvd4 aC/ebMcMfBI4MJHHU4yJ5nZbGBEgr0ZrjzHKOkmTvtxOPKQJuZf2mFzNhRpXe4QUrQ 0fUVUzU5rexyty3+Jci/Uy8j+wA6TvEuq6PiK/iplNxIVDNjoJqfgWRQsvYKu7JQW9 BWa2rf9Q6AwQIjhUDVzljpypailTRWoQ9/H/6C0n32p+ERnAxXJhSgN2M1IEOrZEz5 xvvSjzYR9ako4d7xl9OT7ihSP5jPzeVh4VRLdZR5QXml/bD1idCMQvFq/HEvOGuOSj 7jGdJzwfFBcBw== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 4/9] net/mlx5: Add alias flow table bits Date: Thu, 21 Sep 2023 15:10:30 +0300 Message-ID: <544c030f2a78c4adf3fe6b64f97a39cc1bbdabb9.1695296682.git.leon@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad Add all the capabilities needed to check for alias object support. As well as all the fields or commands needed for its creation and the creation of flow table that is able to jump to an alias object. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- include/linux/mlx5/mlx5_ifc.h | 56 ++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index dd8421d021cf..2b5ae4192de4 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -312,6 +312,7 @@ enum { MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d, MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e, MLX5_CMD_OP_SYNC_CRYPTO = 0xb12, + MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16, MLX5_CMD_OP_MAX }; @@ -1934,6 +1935,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 match_definer_format_supported[0x40]; }; +enum { + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000, +}; + +enum { + MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200, +}; + struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_0[0x80]; @@ -1950,7 +1959,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 migration_tracking_state[0x1]; u8 reserved_at_ca[0x16]; - u8 reserved_at_e0[0xc0]; + u8 cross_vhca_object_to_object_supported[0x20]; + + u8 allowed_object_for_other_vhca_access[0x40]; + + u8 reserved_at_140[0x60]; u8 flow_table_type_2_type[0x8]; u8 reserved_at_1a8[0x3]; @@ -6369,6 +6382,28 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_allow_other_vhca_access_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + u8 reserved_at_40[0x50]; + u8 object_type_to_be_accessed[0x10]; + u8 object_id_to_be_accessed[0x20]; + u8 reserved_at_c0[0x40]; + union { + u8 access_key_raw[0x100]; + u8 access_key[8][0x20]; + }; +}; + +struct mlx5_ifc_allow_other_vhca_access_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + u8 syndrome[0x20]; + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_modify_header_arg_bits { u8 reserved_at_0[0x80]; @@ -6391,6 +6426,24 @@ struct mlx5_ifc_create_match_definer_out_bits { struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; }; +struct mlx5_ifc_alias_context_bits { + u8 vhca_id_to_be_accessed[0x10]; + u8 reserved_at_10[0xd]; + u8 status[0x3]; + u8 object_id_to_be_accessed[0x20]; + u8 reserved_at_40[0x40]; + union { + u8 access_key_raw[0x100]; + u8 access_key[8][0x20]; + }; + u8 metadata[0x80]; +}; + +struct mlx5_ifc_create_alias_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_alias_context_bits alias_ctx; +}; + enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@ -11919,6 +11972,7 @@ enum { MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24, MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27, MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15, }; enum { From patchwork Thu Sep 21 12:10:31 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394137 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A8333E7D0A3 for ; Thu, 21 Sep 2023 17:27:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230093AbjIUR10 (ORCPT ); Thu, 21 Sep 2023 13:27:26 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:53446 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229854AbjIUR05 (ORCPT ); Thu, 21 Sep 2023 13:26:57 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 8E78D212B for ; Thu, 21 Sep 2023 10:02:39 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 0259BC4E662; Thu, 21 Sep 2023 12:11:01 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298262; bh=LJeEZiQFuaOyNrJrVDaZTpW+PV2WcnmWrqmXE4pIdCA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=PyygfKNdSKO8CwnEUnHtbB9MJCeSMIbFfBbEHrfXnR34FRE+c69xuFIcLDc+K9LDc hvHDFAlAZlyz9OAYmKtI8ZG7RUDKiWQCpqxPMgMwTOhxcwUglbIPzr+a3/eddfouhU Dxuv16iot9bTzUWgFwCwyoefWNEYxz+Gh3VaYrlbEI2m0LjCv5dnyc8zKfbsT1mwvB OYG/ssy5hsM9p4xGENbbO1HnGfyhWwR+Sgs8b0BRPBCQIeZwduiOPXI5+n1RxdBGKF I8vhPLYSLIh9cHJEskS6e0fdfOgCrL5f0pD48kHQXYldEJPufDxQycbclf3k4f/6o8 6O05Tl6uVm3bQ== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 5/9] net/mlx5: Implement alias object allow and create functions Date: Thu, 21 Sep 2023 15:10:31 +0300 Message-ID: X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad Add functions which allow one vhca to access another vhca object, and functions that creates an alias object or destroys it. Together they can be used to create cross vhca flow table that is able jump from the steering domain that is managed by one vport, to the steering domain on a different vport. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 70 +++++++++++++++++++ .../ethernet/mellanox/mlx5/core/mlx5_core.h | 22 ++++++ 2 files changed, 92 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index afb348579577..7671c5727ed1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -525,6 +525,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_SAVE_VHCA_STATE: case MLX5_CMD_OP_LOAD_VHCA_STATE: case MLX5_CMD_OP_SYNC_CRYPTO: + case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -ENOLINK; @@ -728,6 +729,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE); MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE); MLX5_COMMAND_STR_CASE(SYNC_CRYPTO); + MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS); default: return "unknown command opcode"; } } @@ -2090,6 +2092,74 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, } EXPORT_SYMBOL(mlx5_cmd_exec_cb); +int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, + struct mlx5_cmd_allow_other_vhca_access_attr *attr) +{ + u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {}; + u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {}; + void *key; + + MLX5_SET(allow_other_vhca_access_in, + in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); + MLX5_SET(allow_other_vhca_access_in, + in, object_type_to_be_accessed, attr->obj_type); + MLX5_SET(allow_other_vhca_access_in, + in, object_id_to_be_accessed, attr->obj_id); + + key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); + memcpy(key, attr->access_key, sizeof(attr->access_key)); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, + struct mlx5_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {}; + void *param; + void *attr; + void *key; + int ret; + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); + MLX5_SET(general_obj_in_cmd_hdr, + attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, + attr, obj_type, alias_attr->obj_type); + param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param); + MLX5_SET(general_obj_create_param, param, alias_object, 1); + + attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); + MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); + MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); + + key = MLX5_ADDR_OF(alias_context, attr, access_key); + memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); + + ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (ret) + return ret; + + *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return 0; +} + +int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, + u16 obj_type) +{ + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + static void destroy_msg_cache(struct mlx5_core_dev *dev) { struct cmd_msg_cache *ch; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 124352459c23..19ffd1816474 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -97,6 +97,22 @@ do { \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define ACCESS_KEY_LEN 32 +#define FT_ID_FT_TYPE_OFFSET 24 + +struct mlx5_cmd_allow_other_vhca_access_attr { + u16 obj_type; + u32 obj_id; + u8 access_key[ACCESS_KEY_LEN]; +}; + +struct mlx5_cmd_alias_obj_create_attr { + u32 obj_id; + u16 vhca_id; + u16 obj_type; + u8 access_key[ACCESS_KEY_LEN]; +}; + static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...) { struct device *device = dev->device; @@ -343,6 +359,12 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev); bool mlx5_rdma_supported(struct mlx5_core_dev *dev); bool mlx5_vnet_supported(struct mlx5_core_dev *dev); bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev); +int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, + struct mlx5_cmd_allow_other_vhca_access_attr *attr); +int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, + struct mlx5_cmd_alias_obj_create_attr *alias_attr, + u32 *obj_id); +int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type); static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev) { From patchwork Thu Sep 21 12:10:32 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394049 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 6B90DE71096 for ; Thu, 21 Sep 2023 17:02:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229726AbjIURCH (ORCPT ); Thu, 21 Sep 2023 13:02:07 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:60476 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229806AbjIURCA (ORCPT ); Thu, 21 Sep 2023 13:02:00 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 5886C173E for ; Thu, 21 Sep 2023 10:00:55 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 6E720C4E666; Thu, 21 Sep 2023 12:11:19 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298280; bh=MwYRBNuDIKtuwPlprN1eygpUkSepifoTrEjLPdJHJO0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=OwAGhJ1DiY1PyKLdDXB+adneX2XZC6Ho7W1xH1IcJNysZx/sRMXxNWEzD/pwVBQFM Vku8pQF3XHTYQdU2qtd/jSxcwQ8KWgOsRorrR9KX/OvSU1H/R0qHjbRbBcUxjZMpbi VZVwrTDZuHo8ui/mm0BapqJcDvwsUxhOhJdkzJ2Ty739N23nip88E6drpv9D6V5szY O1j0OxMikBnkNRX4EyLSr6PAdlYMr8RZdxhRLits158lckY8i48DfWI49/rOdMXML4 cWtippWuszxtaPV8NBAOS1ryaSNj3DVGiEIYHGX0hHYQAffoDl0yaCsO4PRloe5BI0 y1fSOvG5T6UNw== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 6/9] net/mlx5: Add create alias flow table function to ipsec roce Date: Thu, 21 Sep 2023 15:10:32 +0300 Message-ID: <36e15ef41586f2a9aacc65b935de18391eef5607.1695296682.git.leon@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad Implements functions which creates an alias flow table, and check if alias flow table creation is even supported, and if successful returns the created alias flow table object id. This function would be used in later patches to allow jumping from one vhca to another, in order to add support for MPV mode. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- .../mellanox/mlx5/core/lib/ipsec_fs_roce.c | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c index 6176680c470c..648f3d7ab68b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c @@ -4,6 +4,7 @@ #include "fs_core.h" #include "lib/ipsec_fs_roce.h" #include "mlx5_core.h" +#include struct mlx5_ipsec_miss { struct mlx5_flow_group *group; @@ -44,6 +45,71 @@ static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec, MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport); } +static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev, + struct mlx5_core_dev *master_mdev) +{ + u64 obj_allowed_m = MLX5_CAP_GEN_2_64(master_mdev, allowed_object_for_other_vhca_access); + u32 obj_supp_m = MLX5_CAP_GEN_2(master_mdev, cross_vhca_object_to_object_supported); + u64 obj_allowed = MLX5_CAP_GEN_2_64(mdev, allowed_object_for_other_vhca_access); + u32 obj_supp = MLX5_CAP_GEN_2(mdev, cross_vhca_object_to_object_supported); + + if (!(obj_supp & + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS) || + !(obj_supp_m & + MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS)) + return false; + + if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE) || + !(obj_allowed_m & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE)) + return false; + + return true; +} + +static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner, + struct mlx5_core_dev *ibv_allowed, + struct mlx5_flow_table *ft, + u32 *obj_id) +{ + u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id; + u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(ibv_owner, vhca_id); + struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {}; + struct mlx5_cmd_alias_obj_create_attr alias_attr = {}; + char key[ACCESS_KEY_LEN]; + int ret; + int i; + + if (!ipsec_fs_create_alias_supported(ibv_owner, ibv_allowed)) + return -EOPNOTSUPP; + + for (i = 0; i < ACCESS_KEY_LEN; i++) + key[i] = get_random_u64() & 0xFF; + + memcpy(allow_attr.access_key, key, ACCESS_KEY_LEN); + allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; + allow_attr.obj_id = aliased_object_id; + + ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr); + if (ret) { + mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n", + ret); + return ret; + } + + memcpy(alias_attr.access_key, key, ACCESS_KEY_LEN); + alias_attr.obj_id = aliased_object_id; + alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; + alias_attr.vhca_id = vhca_id_to_be_accessed; + ret = mlx5_cmd_alias_obj_create(ibv_allowed, &alias_attr, obj_id); + if (ret) { + mlx5_core_err(ibv_allowed, "Failed to create alias object err=%d\n", + ret); + return ret; + } + + return 0; +} + static int ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, struct mlx5_flow_destination *default_dst, From patchwork Thu Sep 21 12:10:33 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394139 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 31797E7D0A3 for ; Thu, 21 Sep 2023 17:28:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230286AbjIUR2S (ORCPT ); Thu, 21 Sep 2023 13:28:18 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:55166 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230031AbjIUR1q (ORCPT ); Thu, 21 Sep 2023 13:27:46 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 025A92125 for ; Thu, 21 Sep 2023 10:02:54 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id B5DD3C4E66C; Thu, 21 Sep 2023 12:11:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298271; bh=nQbecW5KmlTkWdndJhZ6rqRN2QTdw4WnEiNBWTXDovM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=j6RzQF5hidF88T/JYOINtc1A7d+Yx/zYvZv+7vOy/7gWCvdQhCDHe4Ypg6+I+9Myj 5LFhWwFI+zLpWJIz59gLTvV9Mio3rgQgDSqKYRqdbKTxF85ZUPqEfb76+2UWxTSXpW zLJFY7t64WpHn2w21b4xk6Zb8Y2mCpob71UCqfYJuqbhw1Cj09GDk396p5MyiThmhf xfA1dwSx6bPVuP4sQRGPNmScFapuygkgN0u64ePuvvIDL9KO8UgmWY258IJ9BQ2yIm O2nvAlAH3XEyeG0CCUfen25J4j0SwDW7VEqxsmPrRDFBG59BPxJFn1smrW5Uh36dCD oAfb0QBuWQGiQ== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 7/9] net/mlx5: Configure IPsec steering for egress RoCEv2 MPV traffic Date: Thu, 21 Sep 2023 15:10:33 +0300 Message-ID: <7ca5cf1ac5c6979359b8726e97510574e2b3d44d.1695296682.git.leon@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad Add steering tables/rules in RDMA_TX master domain, to forward all traffic to IPsec crypto table in NIC domain. But in case the traffic is coming from the slave, have to first send the traffic to an alias table in order to switch gvmi and from there we can go to the appropriate gvmi crypto table in NIC domain. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 2 +- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 4 +- .../mellanox/mlx5/core/lib/ipsec_fs_roce.c | 223 +++++++++++++++++- .../mellanox/mlx5/core/lib/ipsec_fs_roce.h | 3 +- 4 files changed, 225 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 4315e4f3d6cd..38848bb7bea1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -562,7 +562,7 @@ static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_ static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, struct mlx5_ipsec_fs *roce) { - mlx5_ipsec_fs_roce_tx_destroy(roce); + mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev); if (tx->chains) { ipsec_chains_destroy(tx->chains); } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index a13b9c2bd144..fdb4885ae217 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -137,7 +137,7 @@ #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1) #define KERNEL_TX_IPSEC_NUM_PRIOS 1 -#define KERNEL_TX_IPSEC_NUM_LEVELS 3 +#define KERNEL_TX_IPSEC_NUM_LEVELS 4 #define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) #define KERNEL_TX_MACSEC_NUM_PRIOS 1 @@ -288,7 +288,7 @@ enum { #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) -#define RDMA_TX_IPSEC_NUM_PRIOS 1 +#define RDMA_TX_IPSEC_NUM_PRIOS 2 #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1 #define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c index 648f3d7ab68b..a82ccae7b614 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c @@ -2,6 +2,8 @@ /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ #include "fs_core.h" +#include "fs_cmd.h" +#include "en.h" #include "lib/ipsec_fs_roce.h" #include "mlx5_core.h" #include @@ -25,6 +27,8 @@ struct mlx5_ipsec_tx_roce { struct mlx5_flow_group *g; struct mlx5_flow_table *ft; struct mlx5_flow_handle *rule; + struct mlx5_flow_table *goto_alias_ft; + u32 alias_id; struct mlx5_flow_namespace *ns; }; @@ -187,9 +191,200 @@ static int ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev *mdev, return err; } -void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce) +static int ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft) { + struct mlx5_flow_destination dst = {}; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.source_vhca_port); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters.source_vhca_port, + MLX5_CAP_GEN(mdev, native_port_num)); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->goto_alias_ft; + rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n", + err); + goto out; + } + roce->rule = rule; + + /* No need for miss rule, since on miss we go to next PRIO, in which + * if master is configured, he will catch the traffic to go to his + * encryption table. + */ + +out: + kvfree(spec); + return err; +} + +#define MLX5_TX_ROCE_GROUP_SIZE BIT(0) +#define MLX5_IPSEC_RDMA_TX_FT_LEVEL 0 +#define MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL 3 /* Since last used level in NIC ipsec is 2 */ + +static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft, + struct mlx5e_priv *peer_priv) +{ + struct mlx5_flow_namespace *roce_ns, *nic_ns; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table next_ft; + struct mlx5_flow_table *ft; + int err; + + roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC); + if (!roce_ns) + return -EOPNOTSUPP; + + nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC); + if (!nic_ns) + return -EOPNOTSUPP; + + err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id); + if (err) + return err; + + next_ft.id = roce->alias_id; + ft_attr.max_fte = 1; + ft_attr.next_ft = &next_ft; + ft_attr.level = MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL; + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft = mlx5_create_flow_table(nic_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec goto alias ft err=%d\n", err); + goto destroy_alias; + } + + roce->goto_alias_ft = ft; + + memset(&ft_attr, 0, sizeof(ft_attr)); + ft_attr.max_fte = 1; + ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL; + ft = mlx5_create_flow_table(roce_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err); + goto destroy_alias_ft; + } + + roce->ft = ft; + + return 0; + +destroy_alias_ft: + mlx5_destroy_flow_table(roce->goto_alias_ft); +destroy_alias: + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + return err; +} + +static int ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_tx_roce *roce, + struct mlx5_flow_table *pol_ft, + u32 *in) +{ + struct mlx5_flow_group *g; + int ix = 0; + int err; + u8 *mc; + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters.source_vhca_port); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS); + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TX_ROCE_GROUP_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err); + return err; + } + roce->g = g; + + err = ipsec_fs_roce_tx_mpv_rule_setup(mdev, roce, pol_ft); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err); + goto destroy_group; + } + + return 0; + +destroy_group: + mlx5_destroy_flow_group(roce->g); + return err; +} + +static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_flow_table *pol_ft, + u32 *in) +{ + struct mlx5_devcom_comp_dev *tmp = NULL; + struct mlx5_ipsec_tx_roce *roce; + struct mlx5e_priv *peer_priv; + int err; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return -EOPNOTSUPP; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + err = -EOPNOTSUPP; + goto release_peer; + } + + roce = &ipsec_roce->tx; + + err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tables err=%d\n", err); + goto release_peer; + } + + err = ipsec_fs_roce_tx_mpv_create_group_rules(mdev, roce, pol_ft, in); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group/rule err=%d\n", err); + goto destroy_tables; + } + + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return 0; + +destroy_tables: + mlx5_destroy_flow_table(roce->ft); + mlx5_destroy_flow_table(roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); +release_peer: + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return err; +} + +void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_core_dev *mdev) +{ + struct mlx5_devcom_comp_dev *tmp = NULL; struct mlx5_ipsec_tx_roce *tx_roce; + struct mlx5e_priv *peer_priv; if (!ipsec_roce) return; @@ -199,9 +394,24 @@ void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce) mlx5_del_flow_rules(tx_roce->rule); mlx5_destroy_flow_group(tx_roce->g); mlx5_destroy_flow_table(tx_roce->ft); -} -#define MLX5_TX_ROCE_GROUP_SIZE BIT(0) + if (!mlx5_core_is_mp_slave(mdev)) + return; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return; + } + + mlx5_destroy_flow_table(tx_roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(peer_priv->mdev, tx_roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); +} int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, @@ -224,7 +434,14 @@ int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, if (!in) return -ENOMEM; + if (mlx5_core_is_mp_slave(mdev)) { + err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in); + goto free_in; + } + ft_attr.max_fte = 1; + ft_attr.prio = 1; + ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL; ft = mlx5_create_flow_table(roce->ns, &ft_attr); if (IS_ERR(ft)) { err = PTR_ERR(ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h index 75475e126181..ad120caf269e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h @@ -17,7 +17,8 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, struct mlx5_flow_namespace *ns, struct mlx5_flow_destination *default_dst, u32 family, u32 level, u32 prio); -void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce); +void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_core_dev *mdev); int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_table *pol_ft); From patchwork Thu Sep 21 12:10:34 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394038 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B356DE71096 for ; Thu, 21 Sep 2023 16:58:51 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229694AbjIUQ6z (ORCPT ); Thu, 21 Sep 2023 12:58:55 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42198 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229705AbjIUQ6x (ORCPT ); Thu, 21 Sep 2023 12:58:53 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E29461FC7 for ; Thu, 21 Sep 2023 09:58:16 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3904DC4E66A; Thu, 21 Sep 2023 12:11:14 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298275; bh=WLs0faSo17z4lcvhlF10ukzTR4xjZpSkiVSD3rg8jRI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=cFwAKfskVKywl8Nsgh5I0f4bzj6D8BY8MEaYq+U4oI0byKzaE309GnySLtH/59tIO 78ZKjxK66628ffcW0XepezPq+SJj7tyaY6p6j94UaMB8x3z7DLChCnlvfSVQyzU41N lbYZbXtkd4tpmcKaOV65Cxr5C5j2EB5SPP898tEa9Iaw2ed/mpfT+6oo3SM2V/XrFY Emk41mds8iX2V4LXSQ3Nzs+C/lJ6FD0fGYQy4I04geKRrpJfi83nlXiE342iGlKj/7 /QmWVp8cGS96YOrwl+blLhxqFnWxpni68hewR0DMlB8XrrlkqjLUBFRxepfU2E7vQW 54KJUXT6bdZYA== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 8/9] net/mlx5: Configure IPsec steering for ingress RoCEv2 MPV traffic Date: Thu, 21 Sep 2023 15:10:34 +0300 Message-ID: X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad Add empty flow table in RDMA_RX master domain, to forward all received traffic to it, in order to continue through the FW RoCE steering. In order to achieve that however, first we check if the decrypted traffic is RoCEv2, if so then forward it to RDMA_RX domain. But in case the traffic is coming from the slave, have to first send the traffic to an alias table in order to switch gvmi and from there we can go to the appropriate gvmi flow table in RDMA_RX master domain. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 4 +- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 6 +- .../mellanox/mlx5/core/lib/ipsec_fs_roce.c | 216 ++++++++++++++++-- .../mellanox/mlx5/core/lib/ipsec_fs_roce.h | 2 +- 4 files changed, 205 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 38848bb7bea1..86f1542b3ab7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -264,7 +264,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, } mlx5_destroy_flow_table(rx->ft.status); - mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); } static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, @@ -422,7 +422,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, err_add: mlx5_destroy_flow_table(rx->ft.status); err_fs_ft_status: - mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family); + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fdb4885ae217..e6bfa7e4f146 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -114,9 +114,9 @@ #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, - * IPsec RoCE policy + * {IPsec RoCE MPV,Alias table},IPsec RoCE policy */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 9 +#define KERNEL_NIC_PRIO_NUM_LEVELS 11 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) @@ -231,7 +231,7 @@ enum { }; #define RDMA_RX_IPSEC_NUM_PRIOS 1 -#define RDMA_RX_IPSEC_NUM_LEVELS 2 +#define RDMA_RX_IPSEC_NUM_LEVELS 4 #define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS) #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c index a82ccae7b614..cce2193608cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c @@ -18,6 +18,11 @@ struct mlx5_ipsec_rx_roce { struct mlx5_flow_table *ft; struct mlx5_flow_handle *rule; struct mlx5_ipsec_miss roce_miss; + struct mlx5_flow_table *nic_master_ft; + struct mlx5_flow_group *nic_master_group; + struct mlx5_flow_handle *nic_master_rule; + struct mlx5_flow_table *goto_alias_ft; + u32 alias_id; struct mlx5_flow_table *ft_rdma; struct mlx5_flow_namespace *ns_rdma; @@ -119,6 +124,7 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, struct mlx5_flow_destination *default_dst, struct mlx5_ipsec_rx_roce *roce) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_flow_destination dst = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_handle *rule; @@ -132,14 +138,19 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; - dst.ft = roce->ft_rdma; + if (is_mpv_slave) { + dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dst.ft = roce->goto_alias_ft; + } else { + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->ft_rdma; + } rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n", err); - goto fail_add_rule; + goto out; } roce->rule = rule; @@ -155,12 +166,30 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev, roce->roce_miss.rule = rule; + if (!is_mpv_slave) + goto out; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE; + dst.ft = roce->ft_rdma; + rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst, + 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule for alias err=%d\n", + err); + goto fail_add_nic_master_rule; + } + roce->nic_master_rule = rule; + kvfree(spec); return 0; +fail_add_nic_master_rule: + mlx5_del_flow_rules(roce->roce_miss.rule); fail_add_default_rule: mlx5_del_flow_rules(roce->rule); -fail_add_rule: +out: kvfree(spec); return err; } @@ -379,6 +408,141 @@ static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev, return err; } +static void roce_rx_mpv_destroy_tables(struct mlx5_core_dev *mdev, struct mlx5_ipsec_rx_roce *roce) +{ + mlx5_destroy_flow_table(roce->goto_alias_ft); + mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); + mlx5_destroy_flow_group(roce->nic_master_group); + mlx5_destroy_flow_table(roce->nic_master_ft); +} + +#define MLX5_RX_ROCE_GROUP_SIZE BIT(0) +#define MLX5_IPSEC_RX_IPV4_FT_LEVEL 3 +#define MLX5_IPSEC_RX_IPV6_FT_LEVEL 2 + +static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_fs *ipsec_roce, + struct mlx5_flow_namespace *ns, + u32 family, u32 level, u32 prio) +{ + struct mlx5_flow_namespace *roce_ns, *nic_ns; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_devcom_comp_dev *tmp = NULL; + struct mlx5_ipsec_rx_roce *roce; + struct mlx5_flow_table next_ft; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *g; + struct mlx5e_priv *peer_priv; + int ix = 0; + u32 *in; + int err; + + roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx : + &ipsec_roce->ipv6_rx; + + if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom)) + return -EOPNOTSUPP; + + peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp); + if (!peer_priv) { + err = -EOPNOTSUPP; + goto release_peer; + } + + roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC); + if (!roce_ns) { + err = -EOPNOTSUPP; + goto release_peer; + } + + nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); + if (!nic_ns) { + err = -EOPNOTSUPP; + goto release_peer; + } + + in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto release_peer; + } + + ft_attr.level = (family == AF_INET) ? MLX5_IPSEC_RX_IPV4_FT_LEVEL : + MLX5_IPSEC_RX_IPV6_FT_LEVEL; + ft_attr.max_fte = 1; + ft = mlx5_create_flow_table(roce_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma master err=%d\n", err); + goto free_in; + } + + roce->ft_rdma = ft; + + ft_attr.max_fte = 1; + ft_attr.prio = prio; + ft_attr.level = level + 2; + ft = mlx5_create_flow_table(nic_ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC master err=%d\n", err); + goto destroy_ft_rdma; + } + roce->nic_master_ft = ft; + + MLX5_SET_CFG(in, start_flow_index, ix); + ix += 1; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + g = mlx5_create_flow_group(roce->nic_master_ft, in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group aliased err=%d\n", err); + goto destroy_nic_master_ft; + } + roce->nic_master_group = g; + + err = ipsec_fs_create_aliased_ft(peer_priv->mdev, mdev, roce->nic_master_ft, + &roce->alias_id); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias FT err=%d\n", err); + goto destroy_group; + } + + next_ft.id = roce->alias_id; + ft_attr.max_fte = 1; + ft_attr.prio = prio; + ft_attr.level = roce->ft->level + 1; + ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED; + ft_attr.next_ft = &next_ft; + ft = mlx5_create_flow_table(ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC slave err=%d\n", err); + goto destroy_alias; + } + roce->goto_alias_ft = ft; + + kvfree(in); + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return 0; + +destroy_alias: + mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id, + MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); +destroy_group: + mlx5_destroy_flow_group(roce->nic_master_group); +destroy_nic_master_ft: + mlx5_destroy_flow_table(roce->nic_master_ft); +destroy_ft_rdma: + mlx5_destroy_flow_table(roce->ft_rdma); +free_in: + kvfree(in); +release_peer: + mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + return err; +} + void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_core_dev *mdev) { @@ -493,8 +657,10 @@ struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_ro return rx_roce->ft; } -void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family) +void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family, + struct mlx5_core_dev *mdev) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_ipsec_rx_roce *rx_roce; if (!ipsec_roce) @@ -503,22 +669,25 @@ void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family) rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx : &ipsec_roce->ipv6_rx; + if (is_mpv_slave) + mlx5_del_flow_rules(rx_roce->nic_master_rule); mlx5_del_flow_rules(rx_roce->roce_miss.rule); mlx5_del_flow_rules(rx_roce->rule); + if (is_mpv_slave) + roce_rx_mpv_destroy_tables(mdev, rx_roce); mlx5_destroy_flow_table(rx_roce->ft_rdma); mlx5_destroy_flow_group(rx_roce->roce_miss.group); mlx5_destroy_flow_group(rx_roce->g); mlx5_destroy_flow_table(rx_roce->ft); } -#define MLX5_RX_ROCE_GROUP_SIZE BIT(0) - int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_namespace *ns, struct mlx5_flow_destination *default_dst, u32 family, u32 level, u32 prio) { + bool is_mpv_slave = mlx5_core_is_mp_slave(mdev); struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_ipsec_rx_roce *roce; struct mlx5_flow_table *ft; @@ -582,18 +751,28 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, } roce->roce_miss.group = g; - memset(&ft_attr, 0, sizeof(ft_attr)); - if (family == AF_INET) - ft_attr.level = 1; - ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr); - if (IS_ERR(ft)) { - err = PTR_ERR(ft); - mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err); - goto fail_rdma_table; + if (is_mpv_slave) { + err = ipsec_fs_roce_rx_mpv_create(mdev, ipsec_roce, ns, family, level, prio); + if (err) { + mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias err=%d\n", err); + goto fail_mpv_create; + } + } else { + memset(&ft_attr, 0, sizeof(ft_attr)); + if (family == AF_INET) + ft_attr.level = 1; + ft_attr.max_fte = 1; + ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, + "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err); + goto fail_rdma_table; + } + + roce->ft_rdma = ft; } - roce->ft_rdma = ft; - err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce); if (err) { mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err); @@ -604,7 +783,10 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, return 0; fail_setup_rule: + if (is_mpv_slave) + roce_rx_mpv_destroy_tables(mdev, roce); mlx5_destroy_flow_table(roce->ft_rdma); +fail_mpv_create: fail_rdma_table: mlx5_destroy_flow_group(roce->roce_miss.group); fail_mgroup: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h index ad120caf269e..435a480400e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h @@ -11,7 +11,7 @@ struct mlx5_ipsec_fs; struct mlx5_flow_table * mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family); void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, - u32 family); + u32 family, struct mlx5_core_dev *mdev); int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_namespace *ns, From patchwork Thu Sep 21 12:10:35 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Leon Romanovsky X-Patchwork-Id: 13394048 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0E902E71096 for ; Thu, 21 Sep 2023 17:01:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229770AbjIURBG (ORCPT ); Thu, 21 Sep 2023 13:01:06 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:33976 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229777AbjIURAr (ORCPT ); Thu, 21 Sep 2023 13:00:47 -0400 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A2FB41FC2 for ; Thu, 21 Sep 2023 09:59:38 -0700 (PDT) Received: by smtp.kernel.org (Postfix) with ESMTPSA id DCC4DC4E670; Thu, 21 Sep 2023 12:11:23 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1695298285; bh=hL5SyeCT7YzpIfTKOD8Vfi4NtP654EsGBZVUNH6YgtM=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=VlPwsJ0su1rb+AQA+P0OXW99GMKkOtw6q1F5gUqNQ4/4wUclOuJ4Xd4lvk6ztMS/3 y0MYMSxuHF3FaCckr9XAdDkSkC25MDem8UjD1lvsMugpshJAkfCX16cpaubkEqrXVc rcKy1y9+4QLqYXtLnlUROkq7EQ/ywmxh7I0yoihHm4kiQB6gF1J2nFHEz7Dr3gX6bX oatIOoj3whIjcUvx+wE6PYB/OQzg04QNIXgk9qnbrmTp178Iv+V+lYAeYhJya0rcjl 0vL7++12YbwNhqeOH15zXtq9CH53m8d3YKJKA5EhIEOUkrgMSYg7SVJ/GgTwxEvSVv AZl6/3+DDPXLA== From: Leon Romanovsky To: Jason Gunthorpe Cc: Patrisious Haddad , Eric Dumazet , Jakub Kicinski , linux-rdma@vger.kernel.org, Mark Bloch , netdev@vger.kernel.org, Paolo Abeni , Saeed Mahameed , Steffen Klassert , Simon Horman Subject: [PATCH mlx5-next 9/9] net/mlx5: Handle IPsec steering upon master unbind/bind Date: Thu, 21 Sep 2023 15:10:35 +0300 Message-ID: <8434e88912c588affe51b34669900382a132e873.1695296682.git.leon@kernel.org> X-Mailer: git-send-email 2.41.0 In-Reply-To: References: MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Patrisious Haddad When the master device is unbinded, make sure to clean up all of the steering rules or flow tables that were created over the master, in order to allow proper unbinding of master, and for ethernet traffic to continue to work independently. Upon bringing master device back up and attaching the slave to it, checks if the slave already has IPsec configured and if so reconfigure the rules needed to support RoCE traffic. Note that while master device is unbound, the user is unable to configure IPsec again, since they are in a kind of illegal state in which they are in MPV mode but the slave has no master. However if IPsec was configured before hand, it will continue to work for ethernet traffic while master is unbound, and would continue to work for all traffic when the master is bound back again. Signed-off-by: Patrisious Haddad Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 7 ++ .../mellanox/mlx5/core/en_accel/ipsec.c | 1 + .../mellanox/mlx5/core/en_accel/ipsec.h | 24 +++- .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 111 +++++++++++++++++- .../mlx5/core/en_accel/ipsec_offload.c | 3 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 28 ++++- .../mellanox/mlx5/core/lib/ipsec_fs_roce.c | 79 +++++++++---- .../mellanox/mlx5/core/lib/ipsec_fs_roce.h | 4 +- 8 files changed, 225 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 44785a209466..2b3254e33fe5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -168,6 +168,13 @@ struct page_pool; #define mlx5e_state_dereference(priv, p) \ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) +enum mlx5e_devcom_events { + MPV_DEVCOM_MASTER_UP, + MPV_DEVCOM_MASTER_DOWN, + MPV_DEVCOM_IPSEC_MASTER_UP, + MPV_DEVCOM_IPSEC_MASTER_DOWN, +}; + static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev) { if (mlx5_lag_is_lacp_owner(mdev)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 49354bc036ad..62f9c19f1028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -850,6 +850,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv) xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC); ipsec->mdev = priv->mdev; + init_completion(&ipsec->comp); ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0, priv->netdev->name); if (!ipsec->wq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index dc8e539dc20e..8f4a37bceaf4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -43,8 +43,6 @@ #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L -#define MPV_DEVCOM_MASTER_UP 1 - struct aes_gcm_keymat { u64 seq_iv; @@ -224,12 +222,20 @@ struct mlx5e_ipsec_tx_create_attr { enum mlx5_flow_namespace_type chains_ns; }; +struct mlx5e_ipsec_mpv_work { + int event; + struct work_struct work; + struct mlx5e_priv *slave_priv; + struct mlx5e_priv *master_priv; +}; + struct mlx5e_ipsec { struct mlx5_core_dev *mdev; struct xarray sadb; struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_hw_stats hw_stats; struct workqueue_struct *wq; + struct completion comp; struct mlx5e_flow_steering *fs; struct mlx5e_ipsec_rx *rx_ipv4; struct mlx5e_ipsec_rx *rx_ipv6; @@ -241,6 +247,7 @@ struct mlx5e_ipsec { struct notifier_block netevent_nb; struct mlx5_ipsec_fs *roce; u8 is_uplink_rep: 1; + struct mlx5e_ipsec_mpv_work mpv_work; }; struct mlx5e_ipsec_esn_state { @@ -331,6 +338,10 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs); +void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv); +void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event); + static inline struct mlx5_core_dev * mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) { @@ -366,6 +377,15 @@ static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } + +static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv) +{ +} + +static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) +{ +} #endif #endif /* __MLX5E_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 86f1542b3ab7..ef4dfc8442a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -229,6 +229,83 @@ static int ipsec_miss_create(struct mlx5_core_dev *mdev, return err; } +static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family) +{ + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET); + struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false); + struct mlx5_flow_destination old_dest, new_dest; + + old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false), + family2tt(family)); + + mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family, + MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO); + + new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family); + new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest); + mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest); +} + +static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family) +{ + struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET); + struct mlx5_flow_destination old_dest, new_dest; + + old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family); + old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false), + family2tt(family)); + mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest); + mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest); + + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev); +} + +static void ipsec_mpv_work_handler(struct work_struct *_work) +{ + struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work); + struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec; + + switch (work->event) { + case MPV_DEVCOM_IPSEC_MASTER_UP: + mutex_lock(&ipsec->tx->ft.mutex); + if (ipsec->tx->ft.refcnt) + mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol, + true); + mutex_unlock(&ipsec->tx->ft.mutex); + + mutex_lock(&ipsec->rx_ipv4->ft.mutex); + if (ipsec->rx_ipv4->ft.refcnt) + handle_ipsec_rx_bringup(ipsec, AF_INET); + mutex_unlock(&ipsec->rx_ipv4->ft.mutex); + + mutex_lock(&ipsec->rx_ipv6->ft.mutex); + if (ipsec->rx_ipv6->ft.refcnt) + handle_ipsec_rx_bringup(ipsec, AF_INET6); + mutex_unlock(&ipsec->rx_ipv6->ft.mutex); + break; + case MPV_DEVCOM_IPSEC_MASTER_DOWN: + mutex_lock(&ipsec->tx->ft.mutex); + if (ipsec->tx->ft.refcnt) + mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev); + mutex_unlock(&ipsec->tx->ft.mutex); + + mutex_lock(&ipsec->rx_ipv4->ft.mutex); + if (ipsec->rx_ipv4->ft.refcnt) + handle_ipsec_rx_cleanup(ipsec, AF_INET); + mutex_unlock(&ipsec->rx_ipv4->ft.mutex); + + mutex_lock(&ipsec->rx_ipv6->ft.mutex); + if (ipsec->rx_ipv6->ft.refcnt) + handle_ipsec_rx_cleanup(ipsec, AF_INET6); + mutex_unlock(&ipsec->rx_ipv6->ft.mutex); + break; + } + + complete(&work->master_priv->ipsec->comp); +} + static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family) { struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false); @@ -665,7 +742,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, } connect_roce: - err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol); + err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false); if (err) goto err_roce; return 0; @@ -1942,6 +2019,8 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1); } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) { ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom); + } else { + mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n"); } return 0; @@ -1988,3 +2067,33 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry) return rx->allow_tunnel_mode; } + +void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, + struct mlx5e_priv *master_priv) +{ + struct mlx5e_ipsec_mpv_work *work; + + reinit_completion(&master_priv->ipsec->comp); + + if (!slave_priv->ipsec) { + complete(&master_priv->ipsec->comp); + return; + } + + work = &slave_priv->ipsec->mpv_work; + + INIT_WORK(&work->work, ipsec_mpv_work_handler); + work->event = event; + work->slave_priv = slave_priv; + work->master_priv = master_priv; + queue_work(slave_priv->ipsec->wq, &work->work); +} + +void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) +{ + if (!priv->ipsec) + return; /* IPsec not supported */ + + mlx5_devcom_send_event(priv->devcom, event, event, priv); + wait_for_completion(&priv->ipsec->comp); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 3245d1c9d539..a91f772dc981 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -5,6 +5,7 @@ #include "en.h" #include "ipsec.h" #include "lib/crypto.h" +#include "lib/ipsec_fs_roce.h" enum { MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET, @@ -63,7 +64,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) caps |= MLX5_IPSEC_CAP_ESPINUDP; } - if (mlx5_get_roce_state(mdev) && + if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) && MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA && MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX) caps |= MLX5_IPSEC_CAP_ROCE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f8054da590c9..44113ab82363 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -183,7 +183,20 @@ static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data) { struct mlx5e_priv *slave_priv = my_data; - mlx5_devcom_comp_set_ready(slave_priv->devcom, true); + switch (event) { + case MPV_DEVCOM_MASTER_UP: + mlx5_devcom_comp_set_ready(slave_priv->devcom, true); + break; + case MPV_DEVCOM_MASTER_DOWN: + /* no need for comp set ready false since we unregister after + * and it hurts cleanup flow. + */ + break; + case MPV_DEVCOM_IPSEC_MASTER_UP: + case MPV_DEVCOM_IPSEC_MASTER_DOWN: + mlx5e_ipsec_handle_mpv_event(event, my_data, event_data); + break; + } return 0; } @@ -198,15 +211,26 @@ static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data) if (IS_ERR_OR_NULL(priv->devcom)) return -EOPNOTSUPP; - if (mlx5_core_is_mp_master(priv->mdev)) + if (mlx5_core_is_mp_master(priv->mdev)) { mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP, MPV_DEVCOM_MASTER_UP, priv); + mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_UP); + } return 0; } static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv) { + if (IS_ERR_OR_NULL(priv->devcom)) + return; + + if (mlx5_core_is_mp_master(priv->mdev)) { + mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_DOWN, + MPV_DEVCOM_MASTER_DOWN, priv); + mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_DOWN); + } + mlx5_devcom_unregister_component(priv->devcom); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c index cce2193608cd..234cd00f71a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c @@ -23,6 +23,7 @@ struct mlx5_ipsec_rx_roce { struct mlx5_flow_handle *nic_master_rule; struct mlx5_flow_table *goto_alias_ft; u32 alias_id; + char key[ACCESS_KEY_LEN]; struct mlx5_flow_table *ft_rdma; struct mlx5_flow_namespace *ns_rdma; @@ -34,6 +35,7 @@ struct mlx5_ipsec_tx_roce { struct mlx5_flow_handle *rule; struct mlx5_flow_table *goto_alias_ft; u32 alias_id; + char key[ACCESS_KEY_LEN]; struct mlx5_flow_namespace *ns; }; @@ -54,37 +56,40 @@ static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec, MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport); } -static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev, - struct mlx5_core_dev *master_mdev) +static bool ipsec_fs_create_alias_supported_one(struct mlx5_core_dev *mdev) { - u64 obj_allowed_m = MLX5_CAP_GEN_2_64(master_mdev, allowed_object_for_other_vhca_access); - u32 obj_supp_m = MLX5_CAP_GEN_2(master_mdev, cross_vhca_object_to_object_supported); u64 obj_allowed = MLX5_CAP_GEN_2_64(mdev, allowed_object_for_other_vhca_access); u32 obj_supp = MLX5_CAP_GEN_2(mdev, cross_vhca_object_to_object_supported); if (!(obj_supp & - MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS) || - !(obj_supp_m & MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS)) return false; - if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE) || - !(obj_allowed_m & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE)) + if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE)) return false; return true; } +static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev, + struct mlx5_core_dev *master_mdev) +{ + if (ipsec_fs_create_alias_supported_one(mdev) && + ipsec_fs_create_alias_supported_one(master_mdev)) + return true; + + return false; +} + static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner, struct mlx5_core_dev *ibv_allowed, struct mlx5_flow_table *ft, - u32 *obj_id) + u32 *obj_id, char *alias_key, bool from_event) { u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id; u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(ibv_owner, vhca_id); struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {}; struct mlx5_cmd_alias_obj_create_attr alias_attr = {}; - char key[ACCESS_KEY_LEN]; int ret; int i; @@ -92,20 +97,23 @@ static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner, return -EOPNOTSUPP; for (i = 0; i < ACCESS_KEY_LEN; i++) - key[i] = get_random_u64() & 0xFF; + if (!from_event) + alias_key[i] = get_random_u64() & 0xFF; - memcpy(allow_attr.access_key, key, ACCESS_KEY_LEN); + memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN); allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; allow_attr.obj_id = aliased_object_id; - ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr); - if (ret) { - mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n", - ret); - return ret; + if (!from_event) { + ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr); + if (ret) { + mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n", + ret); + return ret; + } } - memcpy(alias_attr.access_key, key, ACCESS_KEY_LEN); + memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN); alias_attr.obj_id = aliased_object_id; alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS; alias_attr.vhca_id = vhca_id_to_be_accessed; @@ -268,7 +276,8 @@ static int ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev *mdev, static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev, struct mlx5_ipsec_tx_roce *roce, struct mlx5_flow_table *pol_ft, - struct mlx5e_priv *peer_priv) + struct mlx5e_priv *peer_priv, + bool from_event) { struct mlx5_flow_namespace *roce_ns, *nic_ns; struct mlx5_flow_table_attr ft_attr = {}; @@ -284,7 +293,8 @@ static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev, if (!nic_ns) return -EOPNOTSUPP; - err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id); + err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id, roce->key, + from_event); if (err) return err; @@ -365,7 +375,7 @@ static int ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev *mdev, static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_flow_table *pol_ft, - u32 *in) + u32 *in, bool from_event) { struct mlx5_devcom_comp_dev *tmp = NULL; struct mlx5_ipsec_tx_roce *roce; @@ -383,7 +393,7 @@ static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev, roce = &ipsec_roce->tx; - err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv); + err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv, from_event); if (err) { mlx5_core_err(mdev, "Fail to create RoCE IPsec tables err=%d\n", err); goto release_peer; @@ -503,7 +513,7 @@ static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev, roce->nic_master_group = g; err = ipsec_fs_create_aliased_ft(peer_priv->mdev, mdev, roce->nic_master_ft, - &roce->alias_id); + &roce->alias_id, roce->key, false); if (err) { mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias FT err=%d\n", err); goto destroy_group; @@ -555,6 +565,9 @@ void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, tx_roce = &ipsec_roce->tx; + if (!tx_roce->ft) + return; /* Incase RoCE was cleaned from MPV event flow */ + mlx5_del_flow_rules(tx_roce->rule); mlx5_destroy_flow_group(tx_roce->g); mlx5_destroy_flow_table(tx_roce->ft); @@ -575,11 +588,13 @@ void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, mlx5_cmd_alias_obj_destroy(peer_priv->mdev, tx_roce->alias_id, MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS); mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom); + tx_roce->ft = NULL; } int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, - struct mlx5_flow_table *pol_ft) + struct mlx5_flow_table *pol_ft, + bool from_event) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_ipsec_tx_roce *roce; @@ -599,7 +614,7 @@ int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, return -ENOMEM; if (mlx5_core_is_mp_slave(mdev)) { - err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in); + err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in, from_event); goto free_in; } @@ -668,6 +683,8 @@ void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family, rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx : &ipsec_roce->ipv6_rx; + if (!rx_roce->ft) + return; /* Incase RoCE was cleaned from MPV event flow */ if (is_mpv_slave) mlx5_del_flow_rules(rx_roce->nic_master_rule); @@ -679,6 +696,7 @@ void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family, mlx5_destroy_flow_group(rx_roce->roce_miss.group); mlx5_destroy_flow_group(rx_roce->g); mlx5_destroy_flow_table(rx_roce->ft); + rx_roce->ft = NULL; } int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, @@ -798,6 +816,17 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev, return err; } +bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev) +{ + if (!mlx5_core_mp_enabled(mdev)) + return true; + + if (ipsec_fs_create_alias_supported_one(mdev)) + return true; + + return false; +} + void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce) { kfree(ipsec_roce); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h index 435a480400e5..2a1af78309fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h @@ -21,9 +21,11 @@ void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce, struct mlx5_core_dev *mdev); int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev, struct mlx5_ipsec_fs *ipsec_roce, - struct mlx5_flow_table *pol_ft); + struct mlx5_flow_table *pol_ft, + bool from_event); void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce); struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev, struct mlx5_devcom_comp_dev **devcom); +bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev); #endif /* __MLX5_LIB_IPSEC_H__ */