Commit 8d7e5dee authored by Guillaume Nault's avatar Guillaume Nault Committed by David S. Miller
Browse files

netns: don't disable BHs when locking "nsid_lock"



When peernet2id() had to lock "nsid_lock" before iterating through the
nsid table, we had to disable BHs, because VXLAN can call peernet2id()
from the xmit path:
  vxlan_xmit() -> vxlan_fdb_miss() -> vxlan_fdb_notify()
    -> __vxlan_fdb_notify() -> vxlan_fdb_info() -> peernet2id().

Now that peernet2id() uses RCU protection, "nsid_lock" isn't used in BH
context anymore. Therefore, we can safely use plain
spin_lock()/spin_unlock() and let BHs run when holding "nsid_lock".

Signed-off-by: default avatarGuillaume Nault <gnault@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2dce224f
Loading
Loading
Loading
Loading
+11 −11
Original line number Diff line number Diff line
@@ -237,10 +237,10 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
	if (refcount_read(&net->count) == 0)
		return NETNSA_NSID_NOT_ASSIGNED;

	spin_lock_bh(&net->nsid_lock);
	spin_lock(&net->nsid_lock);
	id = __peernet2id(net, peer);
	if (id >= 0) {
		spin_unlock_bh(&net->nsid_lock);
		spin_unlock(&net->nsid_lock);
		return id;
	}

@@ -250,12 +250,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
	 * just been idr_remove()'d from there in cleanup_net().
	 */
	if (!maybe_get_net(peer)) {
		spin_unlock_bh(&net->nsid_lock);
		spin_unlock(&net->nsid_lock);
		return NETNSA_NSID_NOT_ASSIGNED;
	}

	id = alloc_netid(net, peer, -1);
	spin_unlock_bh(&net->nsid_lock);
	spin_unlock(&net->nsid_lock);

	put_net(peer);
	if (id < 0)
@@ -520,20 +520,20 @@ static void unhash_nsid(struct net *net, struct net *last)
	for_each_net(tmp) {
		int id;

		spin_lock_bh(&tmp->nsid_lock);
		spin_lock(&tmp->nsid_lock);
		id = __peernet2id(tmp, net);
		if (id >= 0)
			idr_remove(&tmp->netns_ids, id);
		spin_unlock_bh(&tmp->nsid_lock);
		spin_unlock(&tmp->nsid_lock);
		if (id >= 0)
			rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
					  GFP_KERNEL);
		if (tmp == last)
			break;
	}
	spin_lock_bh(&net->nsid_lock);
	spin_lock(&net->nsid_lock);
	idr_destroy(&net->netns_ids);
	spin_unlock_bh(&net->nsid_lock);
	spin_unlock(&net->nsid_lock);
}

static LLIST_HEAD(cleanup_list);
@@ -746,9 +746,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
		return PTR_ERR(peer);
	}

	spin_lock_bh(&net->nsid_lock);
	spin_lock(&net->nsid_lock);
	if (__peernet2id(net, peer) >= 0) {
		spin_unlock_bh(&net->nsid_lock);
		spin_unlock(&net->nsid_lock);
		err = -EEXIST;
		NL_SET_BAD_ATTR(extack, nla);
		NL_SET_ERR_MSG(extack,
@@ -757,7 +757,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
	}

	err = alloc_netid(net, peer, nsid);
	spin_unlock_bh(&net->nsid_lock);
	spin_unlock(&net->nsid_lock);
	if (err >= 0) {
		rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
				  nlh, GFP_KERNEL);