@@ -144,6 +144,7 @@ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \
M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \
+M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -1532,6 +1533,12 @@ struct get_rep_cnt_rsp {
u64 rsvd;
};
+struct esw_cfg_req {
+ struct mbox_msghdr hdr;
+ u8 ena;
+ u64 rsvd;
+};
+
struct flow_msg {
unsigned char dmac[6];
unsigned char smac[6];
@@ -596,6 +596,7 @@ struct rvu {
u16 rep_pcifunc;
int rep_cnt;
u16 *rep2pfvf_map;
+ u8 rep_mode;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -1025,7 +1026,7 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
-void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
@@ -1039,4 +1040,8 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
void rvu_mcs_exit(struct rvu *rvu);
+/* Representor APIs */
+int rvu_rep_pf_init(struct rvu *rvu);
+int rvu_rep_install_mcam_rules(struct rvu *rvu);
+void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena);
#endif /* RVU_H */
@@ -1468,6 +1468,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
struct rvu *rvu = rvu_dl->rvu;
struct rvu_switch *rswitch;
+ if (rvu->rep_mode)
+ return -EOPNOTSUPP;
+
rswitch = &rvu->rswitch;
*mode = rswitch->mode;
@@ -1481,6 +1484,9 @@ static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct rvu *rvu = rvu_dl->rvu;
struct rvu_switch *rswitch;
+ if (rvu->rep_mode)
+ return -EOPNOTSUPP;
+
rswitch = &rvu->rswitch;
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
@@ -2741,7 +2741,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
int schq;
u64 cfg;
- if (!is_pf_cgxmapped(rvu, pf))
+ if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc))
return;
cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0;
@@ -4371,8 +4371,6 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
ether_addr_copy(pfvf->default_mac, req->mac_addr);
- rvu_switch_update_rules(rvu, pcifunc);
-
return 0;
}
@@ -5163,7 +5161,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
pfvf = rvu_get_pfvf(rvu, pcifunc);
set_bit(NIXLF_INITIALIZED, &pfvf->flags);
- rvu_switch_update_rules(rvu, pcifunc);
+ rvu_switch_update_rules(rvu, pcifunc, true);
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -5191,6 +5189,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
if (err)
return err;
+ rvu_switch_update_rules(rvu, pcifunc, false);
rvu_cgx_tx_enable(rvu, pcifunc, true);
return 0;
@@ -13,6 +13,246 @@
#include "rvu.h"
#include "rvu_reg.h"
+static int rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc)
+{
+ int id;
+
+ for (id = 0; id < rvu->rep_cnt; id++)
+ if (rvu->rep2pfvf_map[id] == pcifunc)
+ return id;
+ return -ENODEV;
+}
+
+static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc,
+ u16 vlan_tci, int *vidx)
+{
+ struct nix_vtag_config_rsp rsp = {};
+ struct nix_vtag_config req = {};
+ u64 etype = ETH_P_8021Q;
+ int err;
+
+ /* Insert vlan tag */
+ req.hdr.pcifunc = pcifunc;
+ req.vtag_size = VTAGSIZE_T4;
+ req.cfg_type = 0; /* tx vlan cfg */
+ req.tx.cfg_vtag0 = true;
+ req.tx.vtag0 = etype << 48 | ntohs(vlan_tci);
+
+ err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "Tx vlan config failed\n");
+ return err;
+ }
+ *vidx = rsp.vtag0_idx;
+ return 0;
+}
+
+static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_vtag_config req = {};
+ struct nix_vtag_config_rsp rsp;
+
+ /* config strip, capture and size */
+ req.hdr.pcifunc = pcifunc;
+ req.vtag_size = VTAGSIZE_T4;
+ req.cfg_type = 1; /* rx vlan cfg */
+ req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req.rx.strip_vtag = true;
+ req.rx.capture_vtag = false;
+
+ return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp);
+}
+
+static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc,
+ u16 entry, bool rte)
+{
+ struct npc_install_flow_req req = {};
+ struct npc_install_flow_rsp rsp = {};
+ struct rvu_pfvf *pfvf;
+ u16 vlan_tci, rep_id;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* To stree the traffic from Representee to Representor */
+ rep_id = (u16)rvu_rep_get_vlan_id(rvu, pcifunc);
+ if (rte) {
+ vlan_tci = rep_id | 0x1ull << 8;
+ req.vf = rvu->rep_pcifunc;
+ req.op = NIX_RX_ACTIONOP_UCAST;
+ req.index = rep_id;
+ } else {
+ vlan_tci = rep_id;
+ req.vf = pcifunc;
+ req.op = NIX_RX_ACTION_DEFAULT;
+ }
+
+ rvu_rep_rx_vlan_cfg(rvu, req.vf);
+ req.entry = entry;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG);
+ req.vtag0_valid = true;
+ req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req.packet.vlan_etype = (__be16)ETH_P_8021Q;
+ req.mask.vlan_etype = (__be16)ETH_P_8021Q;
+ req.packet.vlan_tci = (__be16)vlan_tci;
+ req.mask.vlan_tci = (__be16)0xffff;
+
+ req.channel = RVU_SWITCH_LBK_CHAN;
+ req.chan_mask = 0xffff;
+ req.intf = pfvf->nix_rx_intf;
+
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry,
+ bool rte)
+{
+ struct npc_install_flow_req req = {};
+ struct npc_install_flow_rsp rsp = {};
+ struct rvu_pfvf *pfvf;
+ int vidx, err;
+ u16 vlan_tci;
+ u8 lbkid;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc);
+ if (rte)
+ vlan_tci |= 0x1ull << 8;
+
+ err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx);
+ if (err)
+ return err;
+
+ lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ if (rte) {
+ req.vf = pcifunc;
+ } else {
+ req.vf = rvu->rep_pcifunc;
+ req.packet.sq_id = vlan_tci;
+ req.mask.sq_id = 0xffff;
+ }
+
+ req.entry = entry;
+ req.intf = pfvf->nix_tx_intf;
+ req.op = NIX_TX_ACTIONOP_UCAST_CHAN;
+ req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN;
+ req.set_cntr = 1;
+ req.vtag0_def = vidx;
+ req.vtag0_op = 1;
+ return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+int rvu_rep_install_mcam_rules(struct rvu *rvu)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ u16 start = rswitch->start_entry;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc, entry = 0;
+ int pf, vf, numvfs;
+ int err, nixlf, i;
+ u8 rep;
+
+ for (pf = 1; pf < hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+ rep = true;
+ for (i = 0; i < 2; i++) {
+ err = rvu_rep_install_rx_rule(rvu, pcifunc, start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ err = rvu_rep_install_tx_rule(rvu, pcifunc, start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ rep = false;
+ }
+
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL);
+ for (vf = 0; vf < numvfs; vf++) {
+ pcifunc = pf << RVU_PFVF_PF_SHIFT |
+ ((vf + 1) & RVU_PFVF_FUNC_MASK);
+ rvu_get_nix_blkaddr(rvu, pcifunc);
+
+ /* Skip installimg rules if nixlf is not attached */
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (err)
+ continue;
+ rep = true;
+ for (i = 0; i < 2; i++) {
+ err = rvu_rep_install_rx_rule(rvu, pcifunc, start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+
+ err = rvu_rep_install_tx_rule(rvu, pcifunc, start + entry, rep);
+ if (err)
+ return err;
+ rswitch->entry2pcifunc[entry++] = pcifunc;
+ rep = false;
+ }
+ }
+ }
+ return 0;
+}
+
+void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
+{
+ struct rvu_switch *rswitch = &rvu->rswitch;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u32 max = rswitch->used_entries;
+ int blkaddr;
+ u16 entry;
+
+ if (!rswitch->used_entries)
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ if (blkaddr < 0)
+ return;
+
+ rvu_switch_enable_lbk_link(rvu, pcifunc, ena);
+ mutex_lock(&mcam->lock);
+ for (entry = 0; entry < max; entry++) {
+ if (rswitch->entry2pcifunc[entry] == pcifunc)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+int rvu_rep_pf_init(struct rvu *rvu)
+{
+ u16 pcifunc = rvu->rep_pcifunc;
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+ rvu_switch_enable_lbk_link(rvu, pcifunc, true);
+ rvu_rep_rx_vlan_cfg(rvu, pcifunc);
+ return 0;
+}
+
+int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req,
+ struct msg_rsp *rsp)
+{
+ if (req->hdr.pcifunc != rvu->rep_pcifunc)
+ return 0;
+
+ rvu->rep_mode = req->ena;
+
+ if (req->ena)
+ rvu_switch_enable(rvu);
+ else
+ rvu_switch_disable(rvu);
+
+ return 0;
+}
+
int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
struct get_rep_cnt_rsp *rsp)
{
@@ -45,4 +285,3 @@ int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req,
}
return 0;
}
-
@@ -166,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu)
alloc_req.contig = true;
alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs;
+ if (rvu->rep_mode)
+ alloc_req.count = alloc_req.count * 4;
ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
&alloc_rsp);
if (ret) {
@@ -189,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu)
rswitch->used_entries = alloc_rsp.count;
rswitch->start_entry = alloc_rsp.entry;
- ret = rvu_switch_install_rules(rvu);
+ if (rvu->rep_mode) {
+ rvu_rep_pf_init(rvu);
+ ret = rvu_rep_install_mcam_rules(rvu);
+ } else {
+ ret = rvu_switch_install_rules(rvu);
+ }
if (ret)
goto uninstall_rules;
@@ -222,6 +229,9 @@ void rvu_switch_disable(struct rvu *rvu)
if (!rswitch->used_entries)
return;
+ if (rvu->rep_mode)
+ goto free_ents;
+
for (pf = 1; pf < hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
@@ -249,6 +259,7 @@ void rvu_switch_disable(struct rvu *rvu)
}
}
+free_ents:
uninstall_req.start = rswitch->start_entry;
uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1;
free_req.all = 1;
@@ -258,12 +269,15 @@ void rvu_switch_disable(struct rvu *rvu)
kfree(rswitch->entry2pcifunc);
}
-void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc)
+void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena)
{
struct rvu_switch *rswitch = &rvu->rswitch;
u32 max = rswitch->used_entries;
u16 entry;
+ if (rvu->rep_mode)
+ return rvu_rep_update_rules(rvu, pcifunc, ena);
+
if (!rswitch->used_entries)
return;
@@ -28,6 +28,22 @@ MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
+static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena)
+{
+ struct esw_cfg_req *req;
+
+ mutex_lock(&priv->mbox.lock);
+ req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox);
+ if (!req) {
+ mutex_unlock(&priv->mbox.lock);
+ return -ENOMEM;
+ }
+ req->ena = ena;
+ otx2_sync_mbox_msg(&priv->mbox);
+ mutex_unlock(&priv->mbox.lock);
+ return 0;
+}
+
static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rep_dev *rep = netdev_priv(dev);
@@ -159,6 +175,7 @@ void rvu_rep_destroy(struct otx2_nic *priv)
struct rep_dev *rep;
int rep_id;
+ rvu_eswitch_config(priv, false);
rvu_rep_free_cq_rsrc(priv);
for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) {
rep = priv->reps[rep_id];
@@ -214,6 +231,7 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
if (err)
goto exit;
+ rvu_eswitch_config(priv, true);
return 0;
exit:
while (--rep_id >= 0) {
@@ -224,7 +242,7 @@ int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
return err;
}
-static int rvu_rep_rsrc_free(struct otx2_nic *priv)
+static void rvu_rep_rsrc_free(struct otx2_nic *priv)
{
struct otx2_qset *qset = &priv->qset;
int wrk;
@@ -235,13 +253,12 @@ static int rvu_rep_rsrc_free(struct otx2_nic *priv)
otx2_free_hw_resources(priv);
otx2_free_queue_mem(qset);
- return 0;
}
static int rvu_rep_rsrc_init(struct otx2_nic *priv)
{
struct otx2_qset *qset = &priv->qset;
- int err = 0;
+ int err;
err = otx2_alloc_queue_mem(priv);
if (err)
Current HW, do not support in-built switch which will forward pkts between representee and representor. When representor is put under a bridge and pkts needs to be sent to representee, then pkts from representor are sent on a HW internal loopback channel, which again will be punted to ingress pkt parser. Now the rules that this patch installs are the MCAM filters/rules which will match against these pkts and forward them to representee. The rules that this patch installs are for basic representor <=> representee path similar to Tun/TAP between VM and Host. Signed-off-by: Geetha sowjanya <gakula@marvell.com> --- .../net/ethernet/marvell/octeontx2/af/mbox.h | 7 + .../net/ethernet/marvell/octeontx2/af/rvu.h | 7 +- .../marvell/octeontx2/af/rvu_devlink.c | 6 + .../ethernet/marvell/octeontx2/af/rvu_nix.c | 7 +- .../ethernet/marvell/octeontx2/af/rvu_rep.c | 241 +++++++++++++++++- .../marvell/octeontx2/af/rvu_switch.c | 18 +- .../net/ethernet/marvell/octeontx2/nic/rep.c | 23 +- 7 files changed, 298 insertions(+), 11 deletions(-)