diff mbox series

[v4,net-next,14/18] net: openvswitch: ensure dev_get_stats can sleep

Message ID 20210108002005.3429956-15-olteanv@gmail.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Make .ndo_get_stats64 sleepable | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count fail Series longer than 15 patches
netdev/tree_selection success Clearly marked for net-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 2 maintainers not CCed: pshelar@ovn.org dev@openvswitch.org
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 133 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Vladimir Oltean Jan. 8, 2021, 12:20 a.m. UTC
From: Vladimir Oltean <vladimir.oltean@nxp.com>

There is an effort to convert .ndo_get_stats64 to sleepable context, and
for that to work, we need to prevent callers of dev_get_stats from using
atomic locking.

The OVS vport driver calls ovs_vport_get_stats from
ovs_vport_cmd_fill_info, a function with 7 callers: 5 under ovs_lock() and
2 under rcu_read_lock(). The RCU-protected callers are the doit and
dumpit callbacks of the OVS_VPORT_CMD_GET genetlink event. Things have
been this way ever since the OVS introduction in commit ccb1352e76cf
("net: Add Open vSwitch kernel components."), probably so that
OVS_PORT_CMD_GET doesn't have to serialize with all the others through
ovs_mutex. Sadly, now they do have to, otherwise we don't have
protection while accessing the datapath and vport structures.

Convert all callers of ovs_vport_cmd_fill_info to assume ovs_mutex
protection. This means that we can get rid of the gfp argument, since
all callers are now sleepable, we can just use GFP_KERNEL for memory
allocation.

Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
---
Changes in v4:
Patch is new.

 net/openvswitch/datapath.c | 38 ++++++++++++++++++--------------------
 net/openvswitch/vport.c    |  2 +-
 2 files changed, 19 insertions(+), 21 deletions(-)
diff mbox series

Patch

diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 5c60a23d0c27..bf22ac608487 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1957,10 +1957,10 @@  static struct genl_family dp_datapath_genl_family __ro_after_init = {
 	.module = THIS_MODULE,
 };
 
-/* Called with ovs_mutex or RCU read lock. */
+/* Called with ovs_mutex */
 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
 				   struct net *net, u32 portid, u32 seq,
-				   u32 flags, u8 cmd, gfp_t gfp)
+				   u32 flags, u8 cmd)
 {
 	struct ovs_header *ovs_header;
 	struct ovs_vport_stats vport_stats;
@@ -1981,7 +1981,7 @@  static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
 		goto nla_put_failure;
 
 	if (!net_eq(net, dev_net(vport->dev))) {
-		int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
+		int id = peernet2id_alloc(net, dev_net(vport->dev), GFP_KERNEL);
 
 		if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
 			goto nla_put_failure;
@@ -2029,15 +2029,14 @@  struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 
-	retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
-					 GFP_KERNEL);
+	retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
 	if (retval)
 		return ERR_PTR(retval);
 
 	return skb;
 }
 
-/* Called with ovs_mutex or RCU read lock. */
+/* Called with ovs_mutex */
 static struct vport *lookup_vport(struct net *net,
 				  const struct ovs_header *ovs_header,
 				  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
@@ -2176,7 +2175,7 @@  static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
 
 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
 				      info->snd_portid, info->snd_seq, 0,
-				      OVS_VPORT_CMD_NEW, GFP_KERNEL);
+				      OVS_VPORT_CMD_NEW);
 	if (err)
 		goto exit_unlock_free;
 
@@ -2238,7 +2237,7 @@  static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
 
 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
 				      info->snd_portid, info->snd_seq, 0,
-				      OVS_VPORT_CMD_SET, GFP_KERNEL);
+				      OVS_VPORT_CMD_SET);
 	if (err)
 		goto exit_unlock_free;
 
@@ -2279,7 +2278,7 @@  static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
 
 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
 				      info->snd_portid, info->snd_seq, 0,
-				      OVS_VPORT_CMD_DEL, GFP_KERNEL);
+				      OVS_VPORT_CMD_DEL);
 	if (err)
 		goto exit_unlock_free;
 
@@ -2320,22 +2319,22 @@  static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
 	if (!reply)
 		return -ENOMEM;
 
-	rcu_read_lock();
+	ovs_lock();
 	vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
 	err = PTR_ERR(vport);
 	if (IS_ERR(vport))
 		goto exit_unlock_free;
 	err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
 				      info->snd_portid, info->snd_seq, 0,
-				      OVS_VPORT_CMD_GET, GFP_ATOMIC);
+				      OVS_VPORT_CMD_GET);
 	if (err)
 		goto exit_unlock_free;
-	rcu_read_unlock();
+	ovs_unlock();
 
 	return genlmsg_reply(reply, info);
 
 exit_unlock_free:
-	rcu_read_unlock();
+	ovs_unlock();
 	kfree_skb(reply);
 	return err;
 }
@@ -2347,25 +2346,24 @@  static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 	int bucket = cb->args[0], skip = cb->args[1];
 	int i, j = 0;
 
-	rcu_read_lock();
-	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
+	ovs_lock();
+	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
 	if (!dp) {
-		rcu_read_unlock();
+		ovs_unlock();
 		return -ENODEV;
 	}
 	for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
 		struct vport *vport;
 
 		j = 0;
-		hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
+		hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
 			if (j >= skip &&
 			    ovs_vport_cmd_fill_info(vport, skb,
 						    sock_net(skb->sk),
 						    NETLINK_CB(cb->skb).portid,
 						    cb->nlh->nlmsg_seq,
 						    NLM_F_MULTI,
-						    OVS_VPORT_CMD_GET,
-						    GFP_ATOMIC) < 0)
+						    OVS_VPORT_CMD_GET) < 0)
 				goto out;
 
 			j++;
@@ -2373,7 +2371,7 @@  static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 		skip = 0;
 	}
 out:
-	rcu_read_unlock();
+	ovs_unlock();
 
 	cb->args[0] = i;
 	cb->args[1] = j;
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index e66c949fd97a..ba1a52addff2 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -265,7 +265,7 @@  void ovs_vport_del(struct vport *vport)
  *
  * Retrieves transmit, receive, and error stats for the given device.
  *
- * Must be called with ovs_mutex or rcu_read_lock.
+ * Must be called with ovs_mutex.
  */
 int ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
 {