diff mbox series

[net-next,8/8] octeontx2-pf: mcs: Introduce MACSEC hardware offloading

Message ID 1664199421-28858-9-git-send-email-sbhatta@marvell.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Introduce macsec hardware offload for cn10k platform | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 2 maintainers not CCed: hkelam@marvell.com gakula@marvell.com
netdev/build_clang fail Errors and warnings before: 0 this patch: 29
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 14 this patch: 51
netdev/checkpatch warning WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Subbaraya Sundeep Sept. 26, 2022, 1:37 p.m. UTC
This patch introduces the macsec offload feature to cn10k
PF netdev driver. The macsec offload ops like adding, deleting
and updating SecYs, SCs, SAs and stats are supported. XPN support
will be added in later patches. Some stats use same counter in hardware
which means based on the SecY mode the same counter represents different
stat. Hence when SecY mode/policy is changed then snapshot of current
stats are captured. Also there is no provision to specify the unique
flow-id/SCI per packet to hardware hence different mac address needs to
be set for macsec interfaces.

Signed-off-by: Subbaraya Sundeep <sbhatta@marvell.com>
Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
---
 .../net/ethernet/marvell/octeontx2/nic/Makefile    |    1 +
 .../ethernet/marvell/octeontx2/nic/cn10k_macsec.c  | 1668 ++++++++++++++++++++
 .../ethernet/marvell/octeontx2/nic/otx2_common.c   |    1 +
 .../ethernet/marvell/octeontx2/nic/otx2_common.h   |   90 ++
 .../net/ethernet/marvell/octeontx2/nic/otx2_pf.c   |   16 +
 5 files changed, 1776 insertions(+)
 create mode 100644 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c

Comments

Jakub Kicinski Sept. 26, 2022, 8:01 p.m. UTC | #1
On Mon, 26 Sep 2022 19:07:01 +0530 Subbaraya Sundeep wrote:
> This patch introduces the macsec offload feature to cn10k
> PF netdev driver. The macsec offload ops like adding, deleting
> and updating SecYs, SCs, SAs and stats are supported. XPN support
> will be added in later patches. Some stats use same counter in hardware
> which means based on the SecY mode the same counter represents different
> stat. Hence when SecY mode/policy is changed then snapshot of current
> stats are captured. Also there is no provision to specify the unique
> flow-id/SCI per packet to hardware hence different mac address needs to
> be set for macsec interfaces.

Does not build:

drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1258:36: error: no member named 'macsec_cfg' in 'struct otx2_nic'
        struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
                                    ~~~~  ^

Please make sure you wait at least 24h before reposting.
kernel test robot Sept. 27, 2022, 4:35 p.m. UTC | #2
Hi Subbaraya,

I love your patch! Yet something to improve:

[auto build test ERROR on net-next/master]

url:    https://github.com/intel-lab-lkp/linux/commits/Subbaraya-Sundeep/Introduce-macsec-hardware-offload-for-cn10k-platform/20220927-111449
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 5dcf41a8e928ca084e1c38ef51d067cd7587999f
config: ia64-allmodconfig
compiler: ia64-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/578004f814eb0e9fbe151db47175222752f6d618
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Subbaraya-Sundeep/Introduce-macsec-hardware-offload-for-cn10k-platform/20220927-111449
        git checkout 578004f814eb0e9fbe151db47175222752f6d618
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=ia64 SHELL=/bin/bash drivers/net/ethernet/marvell/octeontx2/nic/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mcs_secy_rx_cfg':
>> drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:923:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
     923 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mcs_disable_rxscs':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:957:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
     957 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mcs_sync_stats':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:986:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
     986 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_open':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1038:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1038 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_stop':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1062:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1062 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_add_secy':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1080:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1080 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_upd_secy':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1114:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1114 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_del_secy':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1144:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1144 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_add_txsa':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1164:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1164 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_upd_txsa':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1206:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1206 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_del_txsa':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1238:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1238 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_add_rxsc':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1258:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1258 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_upd_rxsc':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1292:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1292 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_del_rxsc':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1311:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1311 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_add_rxsa':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1330:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1330 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_upd_rxsa':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1370:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1370 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_del_rxsa':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1403:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1403 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_get_dev_stats':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1426:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1426 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_get_tx_sc_stats':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1461:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1461 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_get_tx_sa_stats':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1482:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1482 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_get_rx_sc_stats':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1505:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1505 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mdo_get_rx_sa_stats':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1548:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1548 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: At top level:
>> drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1593:6: error: redefinition of 'cn10k_handle_mcs_event'
    1593 | void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
         |      ^~~~~~~~~~~~~~~~~~~~~~
   In file included from drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:10:
   drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h:1033:20: note: previous definition of 'cn10k_handle_mcs_event' with type 'void(struct otx2_nic *, struct mcs_intr_info *)'
    1033 | static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
         |                    ^~~~~~~~~~~~~~~~~~~~~~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_handle_mcs_event':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1595:41: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1595 |         struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
         |                                         ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: At top level:
>> drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1620:5: error: redefinition of 'cn10k_mcs_init'
    1620 | int cn10k_mcs_init(struct otx2_nic *pfvf)
         |     ^~~~~~~~~~~~~~
   drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h:1031:19: note: previous definition of 'cn10k_mcs_init' with type 'int(struct otx2_nic *)'
    1031 | static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
         |                   ^~~~~~~~~~~~~~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mcs_init':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1635:13: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1635 |         pfvf->macsec_cfg = cfg;
         |             ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: At top level:
>> drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1659:6: error: redefinition of 'cn10k_mcs_free'
    1659 | void cn10k_mcs_free(struct otx2_nic *pfvf)
         |      ^~~~~~~~~~~~~~
   drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h:1032:20: note: previous definition of 'cn10k_mcs_free' with type 'void(struct otx2_nic *)'
    1032 | static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
         |                    ^~~~~~~~~~~~~~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c: In function 'cn10k_mcs_free':
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1666:19: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1666 |         kfree(pfvf->macsec_cfg);
         |                   ^~
   drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c:1667:13: error: 'struct otx2_nic' has no member named 'macsec_cfg'
    1667 |         pfvf->macsec_cfg = NULL;
         |             ^~


vim +923 drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c

   919	
   920	static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
   921					 struct macsec_secy *secy, u8 hw_secy_id)
   922	{
 > 923		struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
   924		struct cn10k_mcs_rxsc *mcs_rx_sc;
   925		struct macsec_rx_sc *sw_rx_sc;
   926		struct macsec_rx_sa *sw_rx_sa;
   927		u8 sa_num;
   928	
   929		for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
   930		     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
   931			mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
   932			if (unlikely(!mcs_rx_sc))
   933				continue;
   934	
   935			for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
   936				sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
   937				if (!sw_rx_sa)
   938					continue;
   939	
   940				cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
   941							   sa_num, sw_rx_sa->active);
   942				cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
   943							 sw_rx_sa->next_pn_halves.lower);
   944			}
   945	
   946			cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
   947			cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
   948		}
   949	
   950		return 0;
   951	}
   952
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index d463dc72..73fdb87 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -13,5 +13,6 @@  rvu_nicvf-y := otx2_vf.o otx2_devlink.o
 
 rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
 rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
 
 ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
new file mode 100644
index 0000000..6cc6af9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -0,0 +1,1668 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MACSEC hardware offload driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/bitfield.h>
+#include <net/macsec.h>
+#include "otx2_common.h"
+
+#define MCS_TCAM0_MAC_SA_MASK		GENMASK_ULL(63, 48)
+#define MCS_TCAM1_MAC_SA_MASK		GENMASK_ULL(31, 0)
+#define MCS_TCAM1_ETYPE_MASK		GENMASK_ULL(47, 32)
+
+#define MCS_SA_MAP_MEM_SA_USE		BIT_ULL(9)
+
+#define MCS_RX_SECY_PLCY_RW_MASK	GENMASK_ULL(49, 18)
+#define MCS_RX_SECY_PLCY_RP		BIT_ULL(17)
+#define MCS_RX_SECY_PLCY_AUTH_ENA	BIT_ULL(16)
+#define MCS_RX_SECY_PLCY_CIP		GENMASK_ULL(8, 5)
+#define MCS_RX_SECY_PLCY_VAL		GENMASK_ULL(2, 1)
+#define MCS_RX_SECY_PLCY_ENA		BIT_ULL(0)
+
+#define MCS_TX_SECY_PLCY_MTU		GENMASK_ULL(43, 28)
+#define MCS_TX_SECY_PLCY_ST_TCI		GENMASK_ULL(27, 22)
+#define MCS_TX_SECY_PLCY_ST_OFFSET	GENMASK_ULL(21, 15)
+#define MCS_TX_SECY_PLCY_INS_MODE	BIT_ULL(14)
+#define MCS_TX_SECY_PLCY_AUTH_ENA	BIT_ULL(13)
+#define MCS_TX_SECY_PLCY_CIP		GENMASK_ULL(5, 2)
+#define MCS_TX_SECY_PLCY_PROTECT	BIT_ULL(1)
+#define MCS_TX_SECY_PLCY_ENA		BIT_ULL(0)
+
+#define MCS_GCM_AES_128			0
+#define MCS_GCM_AES_256			1
+#define MCS_GCM_AES_XPN_128		2
+#define MCS_GCM_AES_XPN_256		3
+
+#define MCS_TCI_ES			0x40 /* end station */
+#define MCS_TCI_SC			0x20 /* SCI present */
+#define MCS_TCI_SCB			0x10 /* epon */
+#define MCS_TCI_E			0x08 /* encryption */
+#define MCS_TCI_C			0x04 /* changed text */
+
+static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
+						 struct macsec_secy *secy)
+{
+	struct cn10k_mcs_txsc *txsc;
+
+	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+		if (txsc->sw_secy == secy)
+			return txsc;
+	}
+
+	return NULL;
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
+						 struct macsec_secy *secy,
+						 struct macsec_rx_sc *rx_sc)
+{
+	struct cn10k_mcs_rxsc *rxsc;
+
+	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+		if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
+			return rxsc;
+	}
+
+	return NULL;
+}
+
+static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
+{
+	switch (rsrc_type) {
+	case MCS_RSRC_TYPE_FLOWID:
+		return "FLOW";
+	case MCS_RSRC_TYPE_SC:
+		return "SC";
+	case MCS_RSRC_TYPE_SECY:
+		return "SECY";
+	case MCS_RSRC_TYPE_SA:
+		return "SA";
+	default:
+		return "Unknown";
+	};
+
+	return "Unknown";
+}
+
+static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+				enum mcs_rsrc_type type, u16 *rsrc_id)
+{
+	struct mbox *mbox = &pfvf->mbox;
+	struct mcs_alloc_rsrc_req *req;
+	struct mcs_alloc_rsrc_rsp *rsp;
+	int ret = -ENOMEM;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
+	if (!req)
+		goto fail;
+
+	req->rsrc_type = type;
+	req->rsrc_cnt  = 1;
+	req->dir = dir;
+
+	ret = otx2_sync_mbox_msg(mbox);
+	if (ret)
+		goto fail;
+
+	rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+							     0, &req->hdr);
+	if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
+	    req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	switch (rsp->rsrc_type) {
+	case MCS_RSRC_TYPE_FLOWID:
+		*rsrc_id = rsp->flow_ids[0];
+		break;
+	case MCS_RSRC_TYPE_SC:
+		*rsrc_id = rsp->sc_ids[0];
+		break;
+	case MCS_RSRC_TYPE_SECY:
+		*rsrc_id = rsp->secy_ids[0];
+		break;
+	case MCS_RSRC_TYPE_SA:
+		*rsrc_id = rsp->sa_ids[0];
+		break;
+	default:
+		ret = -EINVAL;
+		goto fail;
+	};
+
+	mutex_unlock(&mbox->lock);
+
+	return 0;
+fail:
+	dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
+		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+				enum mcs_rsrc_type type, u16 hw_rsrc_id,
+				bool all)
+{
+	struct mbox *mbox = &pfvf->mbox;
+	struct mcs_free_rsrc_req *req;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+	if (!req)
+		goto fail;
+
+	req->rsrc_id = hw_rsrc_id;
+	req->rsrc_type = type;
+	req->dir = dir;
+	if (all)
+		req->all = 1;
+
+	if (otx2_sync_mbox_msg(&pfvf->mbox))
+		goto fail;
+
+	mutex_unlock(&mbox->lock);
+
+	return;
+fail:
+	dev_err(pfvf->dev, "Failed to free %s %s resource\n",
+		dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+	mutex_unlock(&mbox->lock);
+}
+
+static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+	return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+	return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
+				   struct macsec_secy *secy, u8 hw_secy_id)
+{
+	struct mcs_secy_plcy_write_req *req;
+	struct mbox *mbox = &pfvf->mbox;
+	u64 policy;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
+	if (secy->replay_protect)
+		policy |= MCS_RX_SECY_PLCY_RP;
+
+	policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
+	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+	policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
+
+	policy |= MCS_RX_SECY_PLCY_ENA;
+
+	req->plcy = policy;
+	req->secy_id = hw_secy_id;
+	req->dir = MCS_RX;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+				     struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+	struct mcs_flowid_entry_write_req *req;
+	struct mbox *mbox = &pfvf->mbox;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+	req->mask[1] = ~0ULL;
+	req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+
+	req->mask[0] = ~0ULL;
+	req->mask[2] = ~0ULL;
+	req->mask[3] = ~0ULL;
+
+	req->flow_id = rxsc->hw_flow_id;
+	req->secy_id = hw_secy_id;
+	req->sc_id = rxsc->hw_sc_id;
+	req->dir = MCS_RX;
+
+	if (sw_rx_sc->active)
+		req->ena = 1;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
+				  struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+	struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+	struct mcs_rx_sc_cam_write_req *sc_req;
+	struct mbox *mbox = &pfvf->mbox;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
+	if (!sc_req) {
+		return -ENOMEM;
+		goto fail;
+	}
+
+	sc_req->sci = cpu_to_be64(sw_rx_sc->sci);
+	sc_req->sc_id = rxsc->hw_sc_id;
+	sc_req->secy_id = hw_secy_id;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
+				      struct macsec_secy *secy,
+				      struct cn10k_mcs_rxsc *rxsc,
+				      u8 assoc_num, bool sa_in_use)
+{
+	unsigned char *src = rxsc->sa_key[assoc_num];
+	struct mcs_sa_plcy_write_req *plcy_req;
+	struct mcs_rx_sc_sa_map *map_req;
+	struct mbox *mbox = &pfvf->mbox;
+	u8 reg, key_len;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+	if (!plcy_req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
+	if (!map_req) {
+		otx2_mbox_reset(&mbox->mbox, 0);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+		memcpy((u8 *)&plcy_req->plcy[0][reg],
+		       (src + reg * 8), 8);
+		reg++;
+	}
+
+	plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
+	plcy_req->sa_cnt = 1;
+	plcy_req->dir = MCS_RX;
+
+	map_req->sa_index = rxsc->hw_sa_id[assoc_num];
+	map_req->sa_in_use = sa_in_use;
+	map_req->sc_id = rxsc->hw_sc_id;
+	map_req->an = assoc_num;
+
+	/* Send two messages together */
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
+				    struct cn10k_mcs_rxsc *rxsc,
+				    u8 assoc_num, u64 next_pn)
+{
+	struct mcs_pn_table_write_req *req;
+	struct mbox *mbox = &pfvf->mbox;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	req->pn_id = rxsc->hw_sa_id[assoc_num];
+	req->next_pn = next_pn;
+	req->dir = MCS_RX;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+				   struct macsec_secy *secy,
+				   struct cn10k_mcs_txsc *txsc)
+{
+	struct mcs_secy_plcy_write_req *req;
+	struct mbox *mbox = &pfvf->mbox;
+	struct macsec_tx_sc *sw_tx_sc;
+	/* Insert SecTag after 12 bytes (DA+SA)*/
+	u8 tag_offset = 12;
+	u8 sectag_tci = 0;
+	u64 policy;
+	int ret;
+
+	sw_tx_sc = &secy->tx_sc;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (sw_tx_sc->send_sci) {
+		sectag_tci |= MCS_TCI_SC;
+	} else {
+		if (sw_tx_sc->end_station)
+			sectag_tci |= MCS_TCI_ES;
+		if (sw_tx_sc->scb)
+			sectag_tci |= MCS_TCI_SCB;
+	}
+
+	if (sw_tx_sc->encrypt)
+		sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+
+	policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+	/* Write SecTag excluding AN bits(1..0) */
+	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+	policy |= MCS_TX_SECY_PLCY_INS_MODE;
+	policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
+	policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+
+	if (secy->protect_frames)
+		policy |= MCS_TX_SECY_PLCY_PROTECT;
+
+	/* If the encodingsa does not exist/active and protect is
+	 * not set then frames can be sent out as it is. Hence enable
+	 * the policy irrespective of secy operational when !protect.
+	 */
+	if (!secy->protect_frames || secy->operational)
+		policy |= MCS_TX_SECY_PLCY_ENA;
+
+	req->plcy = policy;
+	req->secy_id = txsc->hw_secy_id_tx;
+	req->dir = MCS_TX;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
+				     struct macsec_secy *secy,
+				     struct cn10k_mcs_txsc *txsc)
+{
+	struct mcs_flowid_entry_write_req *req;
+	struct mbox *mbox = &pfvf->mbox;
+	u64 mac_sa;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
+
+	req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
+	req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
+
+	req->mask[0] = ~0ULL;
+	req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
+
+	req->mask[1] = ~0ULL;
+	req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
+
+	req->mask[2] = ~0ULL;
+	req->mask[3] = ~0ULL;
+
+	req->flow_id = txsc->hw_flow_id;
+	req->secy_id = txsc->hw_secy_id_tx;
+	req->sc_id = txsc->hw_sc_id;
+	req->sci = cpu_to_be64(secy->sci);
+	req->dir = MCS_TX;
+	/* This can be enabled since stack xmits packets only when interface is up */
+	req->ena = 1;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
+				   struct macsec_secy *secy,
+				   struct cn10k_mcs_txsc *txsc,
+				   u8 sa_num, bool sa_active)
+{
+	struct mcs_tx_sc_sa_map *map_req;
+	struct mbox *mbox = &pfvf->mbox;
+	int ret;
+
+	/* Link the encoding_sa only to SC out of all SAs */
+	if (txsc->encoding_sa != sa_num)
+		return 0;
+
+	mutex_lock(&mbox->lock);
+
+	map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
+	if (!map_req) {
+		otx2_mbox_reset(&mbox->mbox, 0);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	map_req->sa_index0 = txsc->hw_sa_id[sa_num];
+	map_req->sa_index0_vld = sa_active;
+	map_req->sectag_sci = cpu_to_be64(secy->sci);
+	map_req->sc_id = txsc->hw_sc_id;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
+				      struct macsec_secy *secy,
+				      struct cn10k_mcs_txsc *txsc,
+				      u8 assoc_num)
+{
+	unsigned char *src = txsc->sa_key[assoc_num];
+	struct mcs_sa_plcy_write_req *plcy_req;
+	struct mbox *mbox = &pfvf->mbox;
+	u8 reg, key_len;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+	if (!plcy_req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+		memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
+		reg++;
+	}
+
+	plcy_req->plcy[0][8] = assoc_num;
+	plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
+	plcy_req->sa_cnt = 1;
+	plcy_req->dir = MCS_TX;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
+				struct cn10k_mcs_txsc *txsc,
+				u8 assoc_num, u64 next_pn)
+{
+	struct mcs_pn_table_write_req *req;
+	struct mbox *mbox = &pfvf->mbox;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	req->pn_id = txsc->hw_sa_id[assoc_num];
+	req->next_pn = next_pn;
+	req->dir = MCS_TX;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
+				    bool enable, enum mcs_direction dir)
+{
+	struct mcs_flowid_ena_dis_entry *req;
+	struct mbox *mbox = &pfvf->mbox;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
+	if (!req) {
+		return -ENOMEM;
+		goto fail;
+	}
+
+	req->flow_id = hw_flow_id;
+	req->ena = enable;
+	req->dir = dir;
+
+	ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
+			      struct mcs_sa_stats *rsp_p,
+			      enum mcs_direction dir, bool clear)
+{
+	struct mcs_clear_stats *clear_req;
+	struct mbox *mbox = &pfvf->mbox;
+	struct mcs_stats_req *req;
+	struct mcs_sa_stats *rsp;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	req->id = hw_sa_id;
+	req->dir = dir;
+
+	if (!clear)
+		goto send_msg;
+
+	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+	if (!clear_req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	clear_req->id = hw_sa_id;
+	clear_req->dir = dir;
+	clear_req->type = MCS_RSRC_TYPE_SA;
+
+send_msg:
+	ret = otx2_sync_mbox_msg(mbox);
+	if (ret)
+		goto fail;
+
+	rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+						       0, &req->hdr);
+	if (IS_ERR(rsp)) {
+		ret = PTR_ERR(rsp);
+		goto fail;
+	}
+
+	memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+	mutex_unlock(&mbox->lock);
+
+	return 0;
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
+			      struct mcs_sc_stats *rsp_p,
+			      enum mcs_direction dir, bool clear)
+{
+	struct mcs_clear_stats *clear_req;
+	struct mbox *mbox = &pfvf->mbox;
+	struct mcs_stats_req *req;
+	struct mcs_sc_stats *rsp;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	req->id = hw_sc_id;
+	req->dir = dir;
+
+	if (!clear)
+		goto send_msg;
+
+	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+	if (!clear_req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	clear_req->id = hw_sc_id;
+	clear_req->dir = dir;
+	clear_req->type = MCS_RSRC_TYPE_SC;
+
+send_msg:
+	ret = otx2_sync_mbox_msg(mbox);
+	if (ret)
+		goto fail;
+
+	rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+						       0, &req->hdr);
+	if (IS_ERR(rsp)) {
+		ret = PTR_ERR(rsp);
+		goto fail;
+	}
+
+	memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+	mutex_unlock(&mbox->lock);
+
+	return 0;
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
+				struct mcs_secy_stats *rsp_p,
+				enum mcs_direction dir, bool clear)
+{
+	struct mcs_clear_stats *clear_req;
+	struct mbox *mbox = &pfvf->mbox;
+	struct mcs_secy_stats *rsp;
+	struct mcs_stats_req *req;
+	int ret;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
+	if (!req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	req->id = hw_secy_id;
+	req->dir = dir;
+
+	if (!clear)
+		goto send_msg;
+
+	clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+	if (!clear_req) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	clear_req->id = hw_secy_id;
+	clear_req->dir = dir;
+	clear_req->type = MCS_RSRC_TYPE_SECY;
+
+send_msg:
+	ret = otx2_sync_mbox_msg(mbox);
+	if (ret)
+		goto fail;
+
+	rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+							 0, &req->hdr);
+	if (IS_ERR(rsp)) {
+		ret = PTR_ERR(rsp);
+		goto fail;
+	}
+
+	memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+	mutex_unlock(&mbox->lock);
+
+	return 0;
+fail:
+	mutex_unlock(&mbox->lock);
+	return ret;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
+{
+	struct cn10k_mcs_txsc *txsc;
+	int ret;
+
+	txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
+	if (!txsc)
+		return ERR_PTR(-ENOMEM);
+
+	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+				   &txsc->hw_flow_id);
+	if (ret)
+		goto fail;
+
+	/* For a SecY, one TX secy and one RX secy HW resources are needed */
+	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+				   &txsc->hw_secy_id_tx);
+	if (ret)
+		goto free_flowid;
+
+	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+				   &txsc->hw_secy_id_rx);
+	if (ret)
+		goto free_tx_secy;
+
+	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+				   &txsc->hw_sc_id);
+	if (ret)
+		goto free_rx_secy;
+
+	return txsc;
+free_rx_secy:
+	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+			    txsc->hw_secy_id_rx, false);
+free_tx_secy:
+	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+			    txsc->hw_secy_id_tx, false);
+free_flowid:
+	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+			    txsc->hw_flow_id, false);
+fail:
+	return ERR_PTR(ret);
+}
+
+/* Free Tx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
+				  struct cn10k_mcs_txsc *txsc)
+{
+	u8 sa_bmap = txsc->sa_bmap;
+	u8 sa_num = 0;
+
+	while (sa_bmap) {
+		if (sa_bmap & 1) {
+			cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
+						   txsc, sa_num);
+			cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+		}
+		sa_num++;
+		sa_bmap >>= 1;
+	}
+
+	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+			    txsc->hw_sc_id, false);
+	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+			    txsc->hw_secy_id_rx, false);
+	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+			    txsc->hw_secy_id_tx, false);
+	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+			    txsc->hw_flow_id, false);
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
+{
+	struct cn10k_mcs_rxsc *rxsc;
+	int ret;
+
+	rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
+	if (!rxsc)
+		return ERR_PTR(-ENOMEM);
+
+	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+				   &rxsc->hw_flow_id);
+	if (ret)
+		goto fail;
+
+	ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+				   &rxsc->hw_sc_id);
+	if (ret)
+		goto free_flowid;
+
+	return rxsc;
+free_flowid:
+	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+			    rxsc->hw_flow_id, false);
+fail:
+	return ERR_PTR(ret);
+}
+
+/* Free Rx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
+				  struct cn10k_mcs_rxsc *rxsc)
+{
+	u8 sa_bmap = rxsc->sa_bmap;
+	u8 sa_num = 0;
+
+	while (sa_bmap) {
+		if (sa_bmap & 1) {
+			cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
+						   sa_num, false);
+			cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+		}
+		sa_num++;
+		sa_bmap >>= 1;
+	}
+
+	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+			    rxsc->hw_sc_id, false);
+	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+			    rxsc->hw_flow_id, false);
+}
+
+static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
+				 struct cn10k_mcs_txsc *txsc,
+				 struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
+{
+	if (sw_tx_sa) {
+		cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+		cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+				     sw_tx_sa->next_pn_halves.lower);
+		cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
+					sw_tx_sa->active);
+	}
+
+	cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
+	cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
+	/* When updating secy, change RX secy also */
+	cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
+
+	return 0;
+}
+
+static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
+				 struct macsec_secy *secy, u8 hw_secy_id)
+{
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct cn10k_mcs_rxsc *mcs_rx_sc;
+	struct macsec_rx_sc *sw_rx_sc;
+	struct macsec_rx_sa *sw_rx_sa;
+	u8 sa_num;
+
+	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+		if (unlikely(!mcs_rx_sc))
+			continue;
+
+		for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
+			sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
+			if (!sw_rx_sa)
+				continue;
+
+			cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
+						   sa_num, sw_rx_sa->active);
+			cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
+						 sw_rx_sa->next_pn_halves.lower);
+		}
+
+		cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
+		cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
+	}
+
+	return 0;
+}
+
+static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
+				   struct macsec_secy *secy,
+				   bool delete)
+{
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct cn10k_mcs_rxsc *mcs_rx_sc;
+	struct macsec_rx_sc *sw_rx_sc;
+	int ret;
+
+	for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+	     sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+		mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+		if (unlikely(!mcs_rx_sc))
+			continue;
+
+		ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
+					       false, MCS_RX);
+		if (ret)
+			dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
+				mcs_rx_sc->hw_sc_id);
+		if (delete) {
+			cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
+			list_del(&mcs_rx_sc->entry);
+			kfree(mcs_rx_sc);
+		}
+	}
+
+	return 0;
+}
+
+static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
+				 struct cn10k_mcs_txsc *txsc)
+{
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct mcs_secy_stats rx_rsp = { 0 };
+	struct mcs_sc_stats sc_rsp = { 0 };
+	struct cn10k_mcs_rxsc *rxsc;
+
+	/* Because of shared counters for some stats in the hardware, when
+	 * updating secy policy take a snapshot of current stats and reset them.
+	 * Below are the effected stats because of shared counters.
+	 */
+
+	/* Check if sync is really needed */
+	if (secy->validate_frames == txsc->last_validate_frames &&
+	    secy->protect_frames == txsc->last_protect_frames)
+		return;
+
+	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+
+	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+	if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
+		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+	else
+		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+
+	list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+		cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
+
+		rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
+		rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
+
+		rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+		rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+
+		if (txsc->last_protect_frames)
+			rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+		else
+			rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+
+		if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
+			rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+		else
+			rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+	}
+
+	txsc->last_validate_frames = secy->validate_frames;
+	txsc->last_protect_frames = secy->protect_frames;
+}
+
+static int cn10k_mdo_open(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	struct macsec_tx_sa *sw_tx_sa;
+	struct cn10k_mcs_txsc *txsc;
+	u8 sa_num;
+	int err;
+
+	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+	if (!txsc)
+		return -ENOENT;
+
+	sa_num = txsc->encoding_sa;
+	sw_tx_sa = secy->tx_sc.sa[sa_num];
+
+	err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+	if (err)
+		return err;
+
+	return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
+}
+
+static int cn10k_mdo_stop(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct cn10k_mcs_txsc *txsc;
+	int err;
+
+	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+	if (!txsc)
+		return -ENOENT;
+
+	err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+	if (err)
+		return err;
+
+	return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
+}
+
+static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	struct cn10k_mcs_txsc *txsc;
+
+	if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+		return -EOPNOTSUPP;
+
+	/* Stick to 16 bytes key len until XPN support is added */
+	if (secy->key_len != 16)
+		return -EOPNOTSUPP;
+
+	if (secy->xpn)
+		return -EOPNOTSUPP;
+
+	txsc = cn10k_mcs_create_txsc(pfvf);
+	if (IS_ERR(txsc))
+		return -ENOSPC;
+
+	txsc->sw_secy = secy;
+	txsc->encoding_sa = secy->tx_sc.encoding_sa;
+	txsc->last_validate_frames = secy->validate_frames;
+	txsc->last_protect_frames = secy->protect_frames;
+
+	list_add(&txsc->entry, &cfg->txsc_list);
+
+	if (netif_running(secy->netdev))
+		return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+
+	return 0;
+}
+
+static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	struct macsec_tx_sa *sw_tx_sa;
+	struct cn10k_mcs_txsc *txsc;
+	u8 sa_num;
+	int err;
+
+	txsc = cn10k_mcs_get_txsc(cfg, secy);
+	if (!txsc)
+		return -ENOENT;
+
+	txsc->encoding_sa = secy->tx_sc.encoding_sa;
+
+	sa_num = txsc->encoding_sa;
+	sw_tx_sa = secy->tx_sc.sa[sa_num];
+
+	if (netif_running(secy->netdev)) {
+		cn10k_mcs_sync_stats(pfvf, secy, txsc);
+
+		err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int cn10k_mdo_del_secy(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct cn10k_mcs_txsc *txsc;
+
+	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+	if (!txsc)
+		return -ENOENT;
+
+	cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+	cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
+	cn10k_mcs_delete_txsc(pfvf, txsc);
+	list_del(&txsc->entry);
+	kfree(txsc);
+
+	return 0;
+}
+
+static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	u8 sa_num = ctx->sa.assoc_num;
+	struct cn10k_mcs_txsc *txsc;
+	int err;
+
+	txsc = cn10k_mcs_get_txsc(cfg, secy);
+	if (!txsc)
+		return -ENOENT;
+
+	if (sa_num >= CN10K_MCS_SA_PER_SC)
+		return -EOPNOTSUPP;
+
+	if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
+		return -ENOSPC;
+
+	memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
+	txsc->sa_bmap |= 1 << sa_num;
+
+	if (netif_running(secy->netdev)) {
+		err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+		if (err)
+			return err;
+
+		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+					   sw_tx_sa->next_pn_halves.lower);
+		if (err)
+			return err;
+
+		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+					      sa_num, sw_tx_sa->active);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	u8 sa_num = ctx->sa.assoc_num;
+	struct cn10k_mcs_txsc *txsc;
+	int err;
+
+	txsc = cn10k_mcs_get_txsc(cfg, secy);
+	if (!txsc)
+		return -ENOENT;
+
+	if (sa_num >= CN10K_MCS_SA_PER_SC)
+		return -EOPNOTSUPP;
+
+	if (netif_running(secy->netdev)) {
+		/* Keys cannot be changed after creation */
+		err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+					   sw_tx_sa->next_pn_halves.lower);
+		if (err)
+			return err;
+
+		err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+					      sa_num, sw_tx_sa->active);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	u8 sa_num = ctx->sa.assoc_num;
+	struct cn10k_mcs_txsc *txsc;
+
+	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+	if (!txsc)
+		return -ENOENT;
+
+	if (sa_num >= CN10K_MCS_SA_PER_SC)
+		return -EOPNOTSUPP;
+
+	cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+	txsc->sa_bmap &= ~(1 << sa_num);
+
+	return 0;
+}
+
+static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	struct cn10k_mcs_rxsc *rxsc;
+	struct cn10k_mcs_txsc *txsc;
+	int err;
+
+	txsc = cn10k_mcs_get_txsc(cfg, secy);
+	if (!txsc)
+		return -ENOENT;
+
+	rxsc = cn10k_mcs_create_rxsc(pfvf);
+	if (IS_ERR(rxsc))
+		return -ENOSPC;
+
+	rxsc->sw_secy = ctx->secy;
+	rxsc->sw_rxsc = ctx->rx_sc;
+	list_add(&rxsc->entry, &cfg->rxsc_list);
+
+	if (netif_running(secy->netdev)) {
+		err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
+		if (err)
+			return err;
+
+		err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	bool enable = ctx->rx_sc->active;
+	struct cn10k_mcs_rxsc *rxsc;
+
+	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+	if (!rxsc)
+		return -ENOENT;
+
+	if (netif_running(secy->netdev))
+		return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
+						enable, MCS_RX);
+
+	return 0;
+}
+
+static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct cn10k_mcs_rxsc *rxsc;
+
+	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
+	if (!rxsc)
+		return -ENOENT;
+
+	cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
+	cn10k_mcs_delete_rxsc(pfvf, rxsc);
+	list_del(&rxsc->entry);
+	kfree(rxsc);
+
+	return 0;
+}
+
+static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
+{
+	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+	u64 next_pn = rx_sa->next_pn_halves.lower;
+	struct macsec_secy *secy = ctx->secy;
+	bool sa_in_use = rx_sa->active;
+	u8 sa_num = ctx->sa.assoc_num;
+	struct cn10k_mcs_rxsc *rxsc;
+	int err;
+
+	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+	if (!rxsc)
+		return -ENOENT;
+
+	if (sa_num >= CN10K_MCS_SA_PER_SC)
+		return -EOPNOTSUPP;
+
+	if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
+		return -ENOSPC;
+
+	memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
+	rxsc->sa_bmap |= 1 << sa_num;
+
+	if (netif_running(secy->netdev)) {
+		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
+						 sa_num, sa_in_use);
+		if (err)
+			return err;
+
+		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+	u64 next_pn = rx_sa->next_pn_halves.lower;
+	struct macsec_secy *secy = ctx->secy;
+	bool sa_in_use = rx_sa->active;
+	u8 sa_num = ctx->sa.assoc_num;
+	struct cn10k_mcs_rxsc *rxsc;
+	int err;
+
+	rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+	if (!rxsc)
+		return -ENOENT;
+
+	if (sa_num >= CN10K_MCS_SA_PER_SC)
+		return -EOPNOTSUPP;
+
+	if (netif_running(secy->netdev)) {
+		err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
+		if (err)
+			return err;
+
+		err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
+{
+	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	u8 sa_num = ctx->sa.assoc_num;
+	struct cn10k_mcs_rxsc *rxsc;
+
+	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+	if (!rxsc)
+		return -ENOENT;
+
+	if (sa_num >= CN10K_MCS_SA_PER_SC)
+		return -EOPNOTSUPP;
+
+	cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
+	cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+
+	rxsc->sa_bmap &= ~(1 << sa_num);
+
+	return 0;
+}
+
+static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+	struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	struct cn10k_mcs_txsc *txsc;
+
+	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+	if (!txsc)
+		return -ENOENT;
+
+	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
+	ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
+	ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
+
+	cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+	txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+	txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+	txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+	if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
+		txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+	else
+		txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+	txsc->stats.InPktsOverrun = 0;
+
+	ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
+	ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
+	ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
+	ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
+	ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
+	ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
+
+	return 0;
+}
+
+static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct mcs_sc_stats rsp = { 0 };
+	struct cn10k_mcs_txsc *txsc;
+
+	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+	if (!txsc)
+		return -ENOENT;
+
+	cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
+
+	ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+	ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+	ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
+	ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
+
+	return 0;
+}
+
+static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct mcs_sa_stats rsp = { 0 };
+	u8 sa_num = ctx->sa.assoc_num;
+	struct cn10k_mcs_txsc *txsc;
+
+	txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+	if (!txsc)
+		return -ENOENT;
+
+	if (sa_num >= CN10K_MCS_SA_PER_SC)
+		return -EOPNOTSUPP;
+
+	cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
+
+	ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+	ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+
+	return 0;
+}
+
+static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_secy *secy = ctx->secy;
+	struct mcs_sc_stats rsp = { 0 };
+	struct cn10k_mcs_rxsc *rxsc;
+
+	rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+	if (!rxsc)
+		return -ENOENT;
+
+	cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
+
+	rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
+	rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
+
+	rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+	rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+
+	if (secy->protect_frames)
+		rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+	else
+		rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+
+	if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
+		rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+	else
+		rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+
+	ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
+	ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
+	ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
+	ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
+	ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
+	ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
+	ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
+	ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
+
+	return 0;
+}
+
+static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+	struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+	struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct mcs_sa_stats rsp = { 0 };
+	u8 sa_num = ctx->sa.assoc_num;
+	struct cn10k_mcs_rxsc *rxsc;
+
+	rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+	if (!rxsc)
+		return -ENOENT;
+
+	if (sa_num >= CN10K_MCS_SA_PER_SC)
+		return -EOPNOTSUPP;
+
+	cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
+
+	ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
+	ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
+	ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
+	ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
+	ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
+
+	return 0;
+}
+
+static const struct macsec_ops cn10k_mcs_ops = {
+	.mdo_dev_open = cn10k_mdo_open,
+	.mdo_dev_stop = cn10k_mdo_stop,
+	.mdo_add_secy = cn10k_mdo_add_secy,
+	.mdo_upd_secy = cn10k_mdo_upd_secy,
+	.mdo_del_secy = cn10k_mdo_del_secy,
+	.mdo_add_rxsc = cn10k_mdo_add_rxsc,
+	.mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
+	.mdo_del_rxsc = cn10k_mdo_del_rxsc,
+	.mdo_add_rxsa = cn10k_mdo_add_rxsa,
+	.mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
+	.mdo_del_rxsa = cn10k_mdo_del_rxsa,
+	.mdo_add_txsa = cn10k_mdo_add_txsa,
+	.mdo_upd_txsa = cn10k_mdo_upd_txsa,
+	.mdo_del_txsa = cn10k_mdo_del_txsa,
+	.mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
+	.mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
+	.mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
+	.mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
+	.mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
+};
+
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
+{
+	struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+	struct macsec_tx_sa *sw_tx_sa = NULL;
+	struct macsec_secy *secy = NULL;
+	struct cn10k_mcs_txsc *txsc;
+	u8 an;
+
+	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+		return;
+
+	if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
+		return;
+
+	/* Find the SecY to which the expired hardware SA is mapped */
+	list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+		for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
+			if (txsc->hw_sa_id[an] == event->sa_id) {
+				secy = txsc->sw_secy;
+				sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+			}
+	}
+
+	if (secy && sw_tx_sa)
+		macsec_pn_wrapped(secy, sw_tx_sa);
+}
+
+int cn10k_mcs_init(struct otx2_nic *pfvf)
+{
+	struct mbox *mbox = &pfvf->mbox;
+	struct cn10k_mcs_cfg *cfg;
+	struct mcs_intr_cfg *req;
+
+	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+		return 0;
+
+	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+	if (!cfg)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&cfg->txsc_list);
+	INIT_LIST_HEAD(&cfg->rxsc_list);
+	pfvf->macsec_cfg = cfg;
+
+	pfvf->netdev->features |= NETIF_F_HW_MACSEC;
+	pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
+
+	mutex_lock(&mbox->lock);
+
+	req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
+	if (!req)
+		goto fail;
+
+	req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+	if (otx2_sync_mbox_msg(mbox))
+		goto fail;
+
+	mutex_unlock(&mbox->lock);
+
+	return 0;
+fail:
+	dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+	return 0;
+}
+
+void cn10k_mcs_free(struct otx2_nic *pfvf)
+{
+	if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+		return;
+
+	cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
+	cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
+	kfree(pfvf->macsec_cfg);
+	pfvf->macsec_cfg = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index bc3e6aae..9ac9e66 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1827,4 +1827,5 @@  otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf,		\
 }									\
 EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
 MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
 #undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 4c7691a..cc1a6ad2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -19,6 +19,7 @@ 
 #include <net/devlink.h>
 #include <linux/time64.h>
 #include <linux/dim.h>
+#include <uapi/linux/if_macsec.h>
 
 #include <mbox.h>
 #include <npc.h>
@@ -33,6 +34,7 @@ 
 #define PCI_DEVID_OCTEONTX2_RVU_AFVF		0xA0F8
 
 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF		0xB200
+#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF	0xBD00
 
 /* PCI BAR nos */
 #define PCI_CFG_REG_BAR_NUM                     2
@@ -244,6 +246,7 @@  struct otx2_hw {
 #define CN10K_LMTST		2
 #define CN10K_RPM		3
 #define CN10K_PTP_ONESTEP	4
+#define CN10K_HW_MACSEC		5
 	unsigned long		cap_flag;
 
 #define LMT_LINE_SIZE		128
@@ -351,6 +354,66 @@  struct dev_hw_ops {
 	void	(*aura_freeptr)(void *dev, int aura, u64 buf);
 };
 
+#define CN10K_MCS_SA_PER_SC	4
+
+/* Stats which need to be accumulated in software because
+ * of shared counters in hardware.
+ */
+struct cn10k_txsc_stats {
+	u64 InPktsUntagged;
+	u64 InPktsNoTag;
+	u64 InPktsBadTag;
+	u64 InPktsUnknownSCI;
+	u64 InPktsNoSCI;
+	u64 InPktsOverrun;
+};
+
+struct cn10k_rxsc_stats {
+	u64 InOctetsValidated;
+	u64 InOctetsDecrypted;
+	u64 InPktsUnchecked;
+	u64 InPktsDelayed;
+	u64 InPktsOK;
+	u64 InPktsInvalid;
+	u64 InPktsLate;
+	u64 InPktsNotValid;
+	u64 InPktsNotUsingSA;
+	u64 InPktsUnusedSA;
+};
+
+struct cn10k_mcs_txsc {
+	struct macsec_secy *sw_secy;
+	struct cn10k_txsc_stats stats;
+	struct list_head entry;
+	enum macsec_validation_type last_validate_frames;
+	bool last_protect_frames;
+	u16 hw_secy_id_tx;
+	u16 hw_secy_id_rx;
+	u16 hw_flow_id;
+	u16 hw_sc_id;
+	u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+	u8 sa_bmap;
+	u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+	u8 encoding_sa;
+};
+
+struct cn10k_mcs_rxsc {
+	struct macsec_secy *sw_secy;
+	struct macsec_rx_sc *sw_rxsc;
+	struct cn10k_rxsc_stats stats;
+	struct list_head entry;
+	u16 hw_flow_id;
+	u16 hw_sc_id;
+	u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+	u8 sa_bmap;
+	u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+};
+
+struct cn10k_mcs_cfg {
+	struct list_head txsc_list;
+	struct list_head rxsc_list;
+};
+
 struct otx2_nic {
 	void __iomem		*reg_base;
 	struct net_device	*netdev;
@@ -438,6 +501,10 @@  struct otx2_nic {
 
 	/* napi event count. It is needed for adaptive irq coalescing. */
 	u32 napi_events;
+
+#ifdef CONFIG_MACSEC
+	struct cn10k_mcs_cfg	*macsec_cfg;
+#endif
 };
 
 static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -477,6 +544,11 @@  static inline bool is_dev_otx2(struct pci_dev *pdev)
 		midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
 }
 
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+	return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
+}
+
 static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
 {
 	struct otx2_hw *hw = &pfvf->hw;
@@ -508,6 +580,9 @@  static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
 		__set_bit(CN10K_RPM, &hw->cap_flag);
 		__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
 	}
+
+	if (is_dev_cn10kb(pfvf->pdev))
+		__set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
 }
 
 /* Register read/write APIs */
@@ -763,6 +838,7 @@  otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf,		\
 				struct _rsp_type *rsp);			\
 
 MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
 #undef M
 
 /* Time to wait before watchdog kicks off */
@@ -945,4 +1021,18 @@  int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
 int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
 int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
 #endif
+
+#ifdef CONFIG_MACSEC
+/* MACSEC offload support */
+int cn10k_mcs_init(struct otx2_nic *pfvf);
+void cn10k_mcs_free(struct otx2_nic *pfvf);
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
+#else
+static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
+static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
+static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+					  struct mcs_intr_info *event)
+{}
+#endif /* CONFIG_MACSEC */
+
 #endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 88ce472..2d59fe5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -858,6 +858,15 @@  static void otx2_handle_link_event(struct otx2_nic *pf)
 	}
 }
 
+int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
+					 struct mcs_intr_info *event,
+					 struct msg_rsp *rsp)
+{
+	cn10k_handle_mcs_event(pf, event);
+
+	return 0;
+}
+
 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
 					struct cgx_link_info_msg *msg,
 					struct msg_rsp *rsp)
@@ -917,6 +926,7 @@  static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
 		return err;						\
 	}
 MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
 #undef M
 		break;
 	default:
@@ -2765,6 +2775,10 @@  static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	if (err)
 		goto err_ptp_destroy;
 
+	err = cn10k_mcs_init(pf);
+	if (err)
+		goto err_del_mcam_entries;
+
 	if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
 		netdev->hw_features |= NETIF_F_NTUPLE;
 
@@ -2979,6 +2993,8 @@  static void otx2_remove(struct pci_dev *pdev)
 		otx2_config_pause_frm(pf);
 	}
 
+	cn10k_mcs_free(pf);
+
 #ifdef CONFIG_DCB
 	/* Disable PFC config */
 	if (pf->pfc_en) {