@@ -69,6 +69,9 @@
#define IB_PHYS_STATE_LINK_DOWN (3)
#define RXE_ROCE_V2_SPORT (0xc000)
+#ifdef CONFIG_RDMA_RXE_DUAL_PORT_MODULE
+#define RXE_ROCE_V2_DUAL_SPORT (0xd000)
+#endif
static inline u32 rxe_crc32(struct rxe_dev *rxe,
u32 crc, void *next, size_t len)
@@ -47,6 +47,11 @@
static LIST_HEAD(rxe_dev_list);
static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
+#ifdef CONFIG_RDMA_RXE_DUAL_PORT_MODULE
+static DEFINE_SPINLOCK(port_lock); /* spinlock for port change */
+static __be16 dst_port = htons(ROCE_V2_UDP_DPORT);
+static __be16 src_port = htons(RXE_ROCE_V2_SPORT);
+#endif
struct rxe_dev *net_to_rxe(struct net_device *ndev)
{
@@ -249,6 +254,30 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
return dst;
}
+#ifdef CONFIG_RDMA_RXE_DUAL_PORT_MODULE
+static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
+ __be16 dst_port);
+
+static inline void prepare_udp_hdr_spin(struct sk_buff *skb)
+{
+ __be16 dport, sport;
+
+ spin_lock(&port_lock);
+ dport = dst_port;
+ sport = src_port;
+ spin_unlock(&port_lock);
+ prepare_udp_hdr(skb, sport, dport);
+}
+
+static inline void prepare_udp_ports_spin(__be16 dport, __be16 sport)
+{
+ spin_lock(&port_lock);
+ dst_port = dport;
+ src_port = sport;
+ spin_unlock(&port_lock);
+}
+#endif
+
static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct udphdr *udph;
@@ -276,6 +305,11 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
pkt->mask = RXE_GRH_MASK;
pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
+#ifdef CONFIG_RDMA_RXE_DUAL_PORT_MODULE
+ if (unlikely(udph->dest != dst_port))
+ prepare_udp_ports_spin(udph->dest, udph->source);
+#endif
+
return rxe_rcv(skb);
drop:
kfree_skb(skb);
@@ -408,10 +442,12 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
if (!memcmp(saddr, daddr, sizeof(*daddr)))
pkt->mask |= RXE_LOOPBACK_MASK;
-
+#ifdef CONFIG_RDMA_RXE_DUAL_PORT_MODULE
+ prepare_udp_hdr_spin(skb);
+#else
prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
htons(ROCE_V2_UDP_DPORT));
-
+#endif
prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
av->grh.traffic_class, av->grh.hop_limit, df, xnet);
@@ -439,10 +475,12 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
if (!memcmp(saddr, daddr, sizeof(*daddr)))
pkt->mask |= RXE_LOOPBACK_MASK;
-
+#ifdef CONFIG_RDMA_RXE_DUAL_PORT_MODULE
+ prepare_udp_hdr_spin(skb);
+#else
prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
htons(ROCE_V2_UDP_DPORT));
-
+#endif
prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
av->grh.traffic_class,
av->grh.hop_limit);
@@ -509,6 +547,12 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return -EINVAL;
}
+#ifdef CONFIG_RDMA_RXE_DUAL_PORT_MODULE
+ if (unlikely(err != NET_XMIT_SUCCESS))
+ prepare_udp_ports_spin(htons(ROCE_V2_UDP_DUAL_DPORT),
+ htons(RXE_ROCE_V2_DUAL_SPORT));
+#endif
+
if (unlikely(net_xmit_eval(err))) {
pr_debug("error sending packet: %d\n", err);
return -EAGAIN;