Commit f3c7a6e0 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mptcp-cope-with-syncookie-on-MP_JOINs'



Paolo Abeni says:

====================
mptcp: cope with syncookie on MP_JOINs

Currently syncookies on MP_JOIN connections are not handled correctly: the
connections fallback to TCP and are kept alive instead of resetting them at
fallback time.

The first patch propagates the required information up to syn_recv_sock time,
and the 2nd patch addresses the unifying the error path for all MP_JOIN
requests.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5eea3a63 9e365ff5
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -249,6 +249,7 @@ struct mptcp_subflow_request_sock {
	u64	thmac;
	u32	local_nonce;
	u32	remote_nonce;
	struct mptcp_sock	*msk;
};

static inline struct mptcp_subflow_request_sock *
+27 −30
Original line number Diff line number Diff line
@@ -69,6 +69,9 @@ static void subflow_req_destructor(struct request_sock *req)

	pr_debug("subflow_req=%p", subflow_req);

	if (subflow_req->msk)
		sock_put((struct sock *)subflow_req->msk);

	if (subflow_req->mp_capable)
		mptcp_token_destroy_request(subflow_req->token);
	tcp_request_sock_ops.destructor(req);
@@ -86,7 +89,7 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
}

/* validate received token and create truncated hmac and nonce for SYN-ACK */
static bool subflow_token_join_request(struct request_sock *req,
static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
						     const struct sk_buff *skb)
{
	struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
@@ -97,13 +100,13 @@ static bool subflow_token_join_request(struct request_sock *req,
	msk = mptcp_token_get_sock(subflow_req->token);
	if (!msk) {
		SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
		return false;
		return NULL;
	}

	local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
	if (local_id < 0) {
		sock_put((struct sock *)msk);
		return false;
		return NULL;
	}
	subflow_req->local_id = local_id;

@@ -114,9 +117,7 @@ static bool subflow_token_join_request(struct request_sock *req,
			      subflow_req->remote_nonce, hmac);

	subflow_req->thmac = get_unaligned_be64(hmac);

	sock_put((struct sock *)msk);
	return true;
	return msk;
}

static void subflow_init_req(struct request_sock *req,
@@ -133,6 +134,7 @@ static void subflow_init_req(struct request_sock *req,

	subflow_req->mp_capable = 0;
	subflow_req->mp_join = 0;
	subflow_req->msk = NULL;

#ifdef CONFIG_TCP_MD5SIG
	/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@@ -166,12 +168,9 @@ static void subflow_init_req(struct request_sock *req,
		subflow_req->remote_id = mp_opt.join_id;
		subflow_req->token = mp_opt.token;
		subflow_req->remote_nonce = mp_opt.nonce;
		pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
			 subflow_req->remote_nonce);
		if (!subflow_token_join_request(req, skb)) {
			subflow_req->mp_join = 0;
			// @@ need to trigger RST
		}
		subflow_req->msk = subflow_token_join_request(req, skb);
		pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
			 subflow_req->remote_nonce, subflow_req->msk);
	}
}

@@ -354,10 +353,9 @@ static bool subflow_hmac_valid(const struct request_sock *req,
	const struct mptcp_subflow_request_sock *subflow_req;
	u8 hmac[SHA256_DIGEST_SIZE];
	struct mptcp_sock *msk;
	bool ret;

	subflow_req = mptcp_subflow_rsk(req);
	msk = mptcp_token_get_sock(subflow_req->token);
	msk = subflow_req->msk;
	if (!msk)
		return false;

@@ -365,12 +363,7 @@ static bool subflow_hmac_valid(const struct request_sock *req,
			      subflow_req->remote_nonce,
			      subflow_req->local_nonce, hmac);

	ret = true;
	if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
		ret = false;

	sock_put((struct sock *)msk);
	return ret;
	return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
}

static void mptcp_sock_destruct(struct sock *sk)
@@ -438,22 +431,25 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
	struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
	struct mptcp_subflow_request_sock *subflow_req;
	struct mptcp_options_received mp_opt;
	bool fallback_is_fatal = false;
	bool fallback, fallback_is_fatal;
	struct sock *new_msk = NULL;
	bool fallback = false;
	struct sock *child;

	pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);

	/* we need later a valid 'mp_capable' value even when options are not
	 * parsed
	/* After child creation we must look for 'mp_capable' even when options
	 * are not parsed
	 */
	mp_opt.mp_capable = 0;
	if (tcp_rsk(req)->is_mptcp == 0)

	/* hopefully temporary handling for MP_JOIN+syncookie */
	subflow_req = mptcp_subflow_rsk(req);
	fallback_is_fatal = subflow_req->mp_join;
	fallback = !tcp_rsk(req)->is_mptcp;
	if (fallback)
		goto create_child;

	/* if the sk is MP_CAPABLE, we try to fetch the client key */
	subflow_req = mptcp_subflow_rsk(req);
	if (subflow_req->mp_capable) {
		if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
			/* here we can receive and accept an in-window,
@@ -474,12 +470,11 @@ create_msk:
		if (!new_msk)
			fallback = true;
	} else if (subflow_req->mp_join) {
		fallback_is_fatal = true;
		mptcp_get_options(skb, &mp_opt);
		if (!mp_opt.mp_join ||
		    !subflow_hmac_valid(req, &mp_opt)) {
			SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
			return NULL;
			fallback = true;
		}
	}

@@ -522,10 +517,12 @@ create_child:
		} else if (ctx->mp_join) {
			struct mptcp_sock *owner;

			owner = mptcp_token_get_sock(ctx->token);
			owner = subflow_req->msk;
			if (!owner)
				goto dispose_child;

			/* move the msk reference ownership to the subflow */
			subflow_req->msk = NULL;
			ctx->conn = (struct sock *)owner;
			if (!mptcp_finish_join(child))
				goto dispose_child;