Commit e654f9f5 authored by Jon Maloy's avatar Jon Maloy Committed by David S. Miller
Browse files

tipc: clean up skb list lock handling on send path



The policy for handling the skb list locks on the send and receive paths
is simple.

- On the send path we never need to grab the lock on the 'xmitq' list
  when the destination is an exernal node.

- On the receive path we always need to grab the lock on the 'inputq'
  list, irrespective of source node.

However, when transmitting node local messages those will eventually
end up on the receive path of a local socket, meaning that the argument
'xmitq' in tipc_node_xmit() will become the 'ínputq' argument in  the
function tipc_sk_rcv(). This has been handled by always initializing
the spinlock of the 'xmitq' list at message creation, just in case it
may end up on the receive path later, and despite knowing that the lock
in most cases never will be used.

This approach is inaccurate and confusing, and has also concealed the
fact that the stated 'no lock grabbing' policy for the send path is
violated in some cases.

We now clean up this by never initializing the lock at message creation,
instead doing this at the moment we find that the message actually will
enter the receive path. At the same time we fix the four locations
where we incorrectly access the spinlock on the send/error path.

This patch also reverts commit d12cffe9 ("tipc: ensure head->lock
is initialised") which has now become redundant.

CC: Eric Dumazet <edumazet@google.com>
Reported-by: default avatarChris Packham <chris.packham@alliedtelesis.co.nz>
Acked-by: default avatarYing Xue <ying.xue@windriver.com>
Signed-off-by: default avatarJon Maloy <jon.maloy@ericsson.com>
Reviewed-by: default avatarXin Long <lucien.xin@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 10086b34
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -185,7 +185,7 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
	}

	/* We have to transmit across all bearers */
	skb_queue_head_init(&_xmitq);
	__skb_queue_head_init(&_xmitq);
	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
		if (!bb->dests[bearer_id])
			continue;
@@ -256,7 +256,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
	struct sk_buff_head xmitq;
	int rc = 0;

	skb_queue_head_init(&xmitq);
	__skb_queue_head_init(&xmitq);
	tipc_bcast_lock(net);
	if (tipc_link_bc_peers(l))
		rc = tipc_link_xmit(l, pkts, &xmitq);
@@ -286,7 +286,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
	u32 dnode, selector;

	selector = msg_link_selector(buf_msg(skb_peek(pkts)));
	skb_queue_head_init(&_pkts);
	__skb_queue_head_init(&_pkts);

	list_for_each_entry_safe(dst, tmp, &dests->list, list) {
		dnode = dst->node;
@@ -344,7 +344,7 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
	msg_set_size(_hdr, MCAST_H_SIZE);
	msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));

	skb_queue_head_init(&tmpq);
	__skb_queue_head_init(&tmpq);
	__skb_queue_tail(&tmpq, _skb);
	if (method->rcast)
		tipc_bcast_xmit(net, &tmpq, cong_link_cnt);
@@ -378,7 +378,7 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
	int rc = 0;

	skb_queue_head_init(&inputq);
	skb_queue_head_init(&localq);
	__skb_queue_head_init(&localq);

	/* Clone packets before they are consumed by next call */
	if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
+2 −2
Original line number Diff line number Diff line
@@ -199,7 +199,7 @@ void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
	struct tipc_member *m, *tmp;
	struct sk_buff_head xmitq;

	skb_queue_head_init(&xmitq);
	__skb_queue_head_init(&xmitq);
	rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
		tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
		tipc_group_update_member(m, 0);
@@ -435,7 +435,7 @@ bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
		return true;
	if (state == MBR_PENDING && adv == ADV_IDLE)
		return true;
	skb_queue_head_init(&xmitq);
	__skb_queue_head_init(&xmitq);
	tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
	tipc_node_distr_xmit(grp->net, &xmitq);
	return true;
+7 −7
Original line number Diff line number Diff line
@@ -959,7 +959,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
		pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
			skb_queue_len(list), msg_user(hdr),
			msg_type(hdr), msg_size(hdr), mtu);
		skb_queue_purge(list);
		__skb_queue_purge(list);
		return -EMSGSIZE;
	}

@@ -988,7 +988,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
		if (likely(skb_queue_len(transmq) < maxwin)) {
			_skb = skb_clone(skb, GFP_ATOMIC);
			if (!_skb) {
				skb_queue_purge(list);
				__skb_queue_purge(list);
				return -ENOBUFS;
			}
			__skb_dequeue(list);
@@ -1668,7 +1668,7 @@ void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
	struct sk_buff *skb;
	u32 dnode = l->addr;

	skb_queue_head_init(&tnlq);
	__skb_queue_head_init(&tnlq);
	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
			      INT_H_SIZE, BASIC_H_SIZE,
			      dnode, onode, 0, 0, 0);
@@ -1708,9 +1708,9 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
	if (!tnl)
		return;

	skb_queue_head_init(&tnlq);
	skb_queue_head_init(&tmpxq);
	skb_queue_head_init(&frags);
	__skb_queue_head_init(&tnlq);
	__skb_queue_head_init(&tmpxq);
	__skb_queue_head_init(&frags);

	/* At least one packet required for safe algorithm => add dummy */
	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
@@ -1720,7 +1720,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
		pr_warn("%sunable to create tunnel packet\n", link_co_err);
		return;
	}
	skb_queue_tail(&tnlq, skb);
	__skb_queue_tail(&tnlq, skb);
	tipc_link_xmit(l, &tnlq, &tmpxq);
	__skb_queue_purge(&tmpxq);

+1 −1
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ void tipc_named_node_up(struct net *net, u32 dnode)
	struct name_table *nt = tipc_name_table(net);
	struct sk_buff_head head;

	skb_queue_head_init(&head);
	__skb_queue_head_init(&head);

	read_lock_bh(&nt->cluster_scope_lock);
	named_distribute(net, &head, dnode, &nt->cluster_scope);
+4 −3
Original line number Diff line number Diff line
@@ -1444,13 +1444,14 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,

	if (in_own_node(net, dnode)) {
		tipc_loopback_trace(net, list);
		spin_lock_init(&list->lock);
		tipc_sk_rcv(net, list);
		return 0;
	}

	n = tipc_node_find(net, dnode);
	if (unlikely(!n)) {
		skb_queue_purge(list);
		__skb_queue_purge(list);
		return -EHOSTUNREACH;
	}

@@ -1459,7 +1460,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
	if (unlikely(bearer_id == INVALID_BEARER_ID)) {
		tipc_node_read_unlock(n);
		tipc_node_put(n);
		skb_queue_purge(list);
		__skb_queue_purge(list);
		return -EHOSTUNREACH;
	}

@@ -1491,7 +1492,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
{
	struct sk_buff_head head;

	skb_queue_head_init(&head);
	__skb_queue_head_init(&head);
	__skb_queue_tail(&head, skb);
	tipc_node_xmit(net, &head, dnode, selector);
	return 0;
Loading