@@ -1746,6 +1746,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
+ vxlan_vnifilter_count(vxlan, vni, VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
}
@@ -1754,10 +1755,12 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
rcu_read_unlock();
atomic_long_inc(&vxlan->dev->rx_dropped);
+ vxlan_vnifilter_count(vxlan, vni, VXLAN_VNI_STATS_RX_DROPS, 0);
goto drop;
}
dev_sw_netstats_rx_add(vxlan->dev, skb->len);
+ vxlan_vnifilter_count(vxlan, vni, VXLAN_VNI_STATS_RX, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
@@ -1865,8 +1868,12 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
- if (netif_rx_ni(reply) == NET_RX_DROP)
+ if (netif_rx_ni(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
+
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin.sin_addr.s_addr = tip,
@@ -2020,9 +2027,11 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
if (reply == NULL)
goto out;
- if (netif_rx_ni(reply) == NET_RX_DROP)
+ if (netif_rx_ni(reply) == NET_RX_DROP) {
dev->stats.rx_dropped++;
-
+ vxlan_vnifilter_count(vxlan, vni,
+ VXLAN_VNI_STATS_RX_DROPS, 0);
+ }
} else if (vxlan->cfg.flags & VXLAN_F_L3MISS) {
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
@@ -2356,15 +2365,19 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
tx_stats->tx_packets++;
tx_stats->tx_bytes += len;
u64_stats_update_end(&tx_stats->syncp);
+ vxlan_vnifilter_count(src_vxlan, vni, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += len;
u64_stats_update_end(&rx_stats->syncp);
+ vxlan_vnifilter_count(dst_vxlan, vni, VXLAN_VNI_STATS_RX, len);
} else {
drop:
dev->stats.rx_dropped++;
+ vxlan_vnifilter_count(dst_vxlan, vni, VXLAN_VNI_STATS_RX_DROPS,
+ 0);
}
rcu_read_unlock();
}
@@ -2394,6 +2407,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
vxlan->cfg.flags);
if (!dst_vxlan) {
dev->stats.tx_errors++;
+ vxlan_vnifilter_count(vxlan, vni,
+ VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
return -ENOENT;
@@ -2417,6 +2432,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
union vxlan_addr remote_ip, local_ip;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
+ unsigned int pkt_len = skb->len;
__be16 src_port = 0, dst_port;
struct dst_entry *ndst = NULL;
__u8 tos, ttl;
@@ -2644,12 +2660,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
label, src_port, dst_port, !udp_sum);
#endif
}
+ vxlan_vnifilter_count(vxlan, vni, VXLAN_VNI_STATS_TX, pkt_len);
out_unlock:
rcu_read_unlock();
return;
drop:
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2661,6 +2679,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dev->stats.tx_carrier_errors++;
dst_release(ndst);
dev->stats.tx_errors++;
+ vxlan_vnifilter_count(vxlan, vni, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2693,6 +2712,8 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
drop:
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(netdev_priv(dev), vni,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
}
@@ -2767,6 +2788,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_fdb_miss(vxlan, eth->h_dest);
dev->stats.tx_dropped++;
+ vxlan_vnifilter_count(vxlan, vni,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -154,6 +154,8 @@ void vxlan_vnigroup_uninit(struct vxlan_dev *vxlan);
void vxlan_vnifilter_init(void);
void vxlan_vnifilter_uninit(void);
+void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ int type, unsigned int len);
void vxlan_vs_add_vnigrp(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
@@ -164,7 +166,6 @@ int vxlan_vnilist_update_group(struct vxlan_dev *vxlan,
union vxlan_addr *new_remote_ip,
struct netlink_ext_ack *extack);
-
/* vxlan_multicast.c */
int vxlan_multicast_join(struct vxlan_dev *vxlan);
int vxlan_multicast_leave(struct vxlan_dev *vxlan);
@@ -97,6 +97,80 @@ void vxlan_vs_del_vnigrp(struct vxlan_dev *vxlan)
spin_unlock(&vn->sock_lock);
}
+static void vxlan_vnifilter_stats_get(const struct vxlan_vni_node *vninode,
+ struct vxlan_vni_stats *dest)
+{
+ int i;
+
+ memset(dest, 0, sizeof(*dest));
+ for_each_possible_cpu(i) {
+ struct vxlan_vni_stats_pcpu *pstats;
+ struct vxlan_vni_stats temp;
+ unsigned int start;
+
+ pstats = per_cpu_ptr(vninode->stats, i);
+ do {
+ start = u64_stats_fetch_begin_irq(&pstats->syncp);
+ memcpy(&temp, &pstats->stats, sizeof(temp));
+ } while (u64_stats_fetch_retry_irq(&pstats->syncp, start));
+
+ dest->rx_packets += temp.rx_packets;
+ dest->rx_bytes += temp.rx_bytes;
+ dest->rx_drops += temp.rx_drops;
+ dest->rx_errors += temp.rx_errors;
+ dest->tx_packets += temp.tx_packets;
+ dest->tx_bytes += temp.tx_bytes;
+ dest->tx_drops += temp.tx_drops;
+ dest->tx_errors += temp.tx_errors;
+ }
+}
+
+static void vxlan_vnifilter_stats_add(struct vxlan_vni_node *vninode,
+ int type, unsigned int len)
+{
+ struct vxlan_vni_stats_pcpu *pstats = this_cpu_ptr(vninode->stats);
+
+ u64_stats_update_begin(&pstats->syncp);
+ switch (type) {
+ case VXLAN_VNI_STATS_RX:
+ pstats->stats.rx_bytes += len;
+ pstats->stats.rx_packets++;
+ break;
+ case VXLAN_VNI_STATS_RX_DROPS:
+ pstats->stats.rx_drops++;
+ break;
+ case VXLAN_VNI_STATS_RX_ERRORS:
+ pstats->stats.rx_errors++;
+ break;
+ case VXLAN_VNI_STATS_TX:
+ pstats->stats.tx_bytes += len;
+ pstats->stats.tx_packets++;
+ break;
+ case VXLAN_VNI_STATS_TX_DROPS:
+ pstats->stats.tx_drops++;
+ break;
+ case VXLAN_VNI_STATS_TX_ERRORS:
+ pstats->stats.tx_errors++;
+ break;
+ }
+ u64_stats_update_end(&pstats->syncp);
+}
+
+void vxlan_vnifilter_count(struct vxlan_dev *vxlan, __be32 vni,
+ int type, unsigned int len)
+{
+ struct vxlan_vni_node *vninode;
+
+ if (!(vxlan->cfg.flags & VXLAN_F_VNIFILTER))
+ return;
+
+ vninode = vxlan_vnifilter_lookup(vxlan, vni);
+ if (!vninode)
+ return;
+
+ vxlan_vnifilter_stats_add(vninode, type, len);
+}
+
static u32 vnirange(struct vxlan_vni_node *vbegin,
struct vxlan_vni_node *vend)
{
@@ -539,6 +613,11 @@ static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
vninode = kzalloc(sizeof(*vninode), GFP_ATOMIC);
if (!vninode)
return NULL;
+ vninode->stats = netdev_alloc_pcpu_stats(struct vxlan_vni_stats_pcpu);
+ if (!vninode->stats) {
+ kfree(vninode);
+ return NULL;
+ }
vninode->vni = vni;
vninode->hlist4.vxlan = vxlan;
#if IS_ENABLED(CONFIG_IPV6)
@@ -596,6 +675,7 @@ static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
struct vxlan_vni_node *v;
v = container_of(rcu, struct vxlan_vni_node, rcu);
+ free_percpu(v->stats);
kfree(v);
}
@@ -227,6 +227,31 @@ struct vxlan_config {
enum ifla_vxlan_df df;
};
+enum {
+ VXLAN_VNI_STATS_RX,
+ VXLAN_VNI_STATS_RX_DROPS,
+ VXLAN_VNI_STATS_RX_ERRORS,
+ VXLAN_VNI_STATS_TX,
+ VXLAN_VNI_STATS_TX_DROPS,
+ VXLAN_VNI_STATS_TX_ERRORS,
+};
+
+struct vxlan_vni_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 rx_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+ u64 tx_errors;
+};
+
+struct vxlan_vni_stats_pcpu {
+ struct vxlan_vni_stats stats;
+ struct u64_stats_sync syncp;
+};
+
struct vxlan_dev_node {
struct hlist_node hlist;
struct vxlan_dev *vxlan;
@@ -241,6 +266,7 @@ struct vxlan_vni_node {
struct list_head vlist;
__be32 vni;
union vxlan_addr remote_ip; /* default remote ip for this vni */
+ struct vxlan_vni_stats_pcpu __percpu *stats;
struct rcu_head rcu;
};