Commit 519be699 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Off by one in netlink parsing of mac802154_hwsim, from Alexander
    Aring.

 2) nf_tables RCU usage fix from Taehee Yoo.

 3) Flow dissector needs nhoff and thoff clamping, from Stanislav
    Fomichev.

 4) Missing sin6_flowinfo initialization in SCTP, from Xin Long.

 5) Spectrev1 in ipmr and ip6mr, from Gustavo A. R. Silva.

 6) Fix r8169 crash when DEBUG_SHIRQ is enabled, from Heiner Kallweit.

 7) Fix SKB leak in rtlwifi, from Larry Finger.

 8) Fix state pruning in bpf verifier, from Jakub Kicinski.

 9) Don't handle completely duplicate fragments as overlapping, from
    Michal Kubecek.

10) Fix memory corruption with macb and 64-bit DMA, from Anssi Hannula.

11) Fix TCP fallback socket release in smc, from Myungho Jung.

12) gro_cells_destroy needs to napi_disable, from Lorenzo Bianconi.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (130 commits)
  rds: Fix warning.
  neighbor: NTF_PROXY is a valid ndm_flag for a dump request
  net: mvpp2: fix the phylink mode validation
  net/sched: cls_flower: Remove old entries from rhashtable
  net/tls: allocate tls context using GFP_ATOMIC
  iptunnel: make TUNNEL_FLAGS available in uapi
  gro_cell: add napi_disable in gro_cells_destroy
  lan743x: Remove MAC Reset from initialization
  net/mlx5e: Remove the false indication of software timestamping support
  net/mlx5: Typo fix in del_sw_hw_rule
  net/mlx5e: RX, Fix wrong early return in receive queue poll
  ipv6: explicitly initialize udp6_addr in udp_sock_create6()
  bnxt_en: Fix ethtool self-test loopback.
  net/rds: remove user triggered WARN_ON in rds_sendmsg
  net/rds: fix warn in rds_message_alloc_sgs
  ath10k: skip sending quiet mode cmd for WCN3990
  mac80211: free skb fraglist before freeing the skb
  nl80211: fix memory leak if validate_pae_over_nl80211() fails
  net/smc: fix TCP fallback socket release
  vxge: ensure data0 is initialized in when fetching firmware version information
  ...
parents ab63e725 d84e7bc0
Loading
Loading
Loading
Loading
+9 −2
Original line number Diff line number Diff line
@@ -8943,7 +8943,7 @@ F: arch/mips/boot/dts/img/pistachio_marduk.dts

MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
M:	Andrew Lunn <andrew@lunn.ch>
M:	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
M:	Vivien Didelot <vivien.didelot@gmail.com>
L:	netdev@vger.kernel.org
S:	Maintained
F:	drivers/net/dsa/mv88e6xxx/
@@ -9448,6 +9448,13 @@ F: drivers/media/platform/mtk-vpu/
F:	Documentation/devicetree/bindings/media/mediatek-vcodec.txt
F:	Documentation/devicetree/bindings/media/mediatek-vpu.txt

MEDIATEK MT76 WIRELESS LAN DRIVER
M:	Felix Fietkau <nbd@nbd.name>
M:	Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
L:	linux-wireless@vger.kernel.org
S:	Maintained
F:	drivers/net/wireless/mediatek/mt76/

MEDIATEK MT7601U WIRELESS LAN DRIVER
M:	Jakub Kicinski <kubakici@wp.pl>
L:	linux-wireless@vger.kernel.org
@@ -10418,7 +10425,7 @@ F: drivers/net/wireless/

NETWORKING [DSA]
M:	Andrew Lunn <andrew@lunn.ch>
M:	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
M:	Vivien Didelot <vivien.didelot@gmail.com>
M:	Florian Fainelli <f.fainelli@gmail.com>
S:	Maintained
F:	Documentation/devicetree/bindings/net/dsa/
+7 −0
Original line number Diff line number Diff line
@@ -153,6 +153,11 @@ struct chtls_dev {
	unsigned int cdev_state;
};

struct chtls_listen {
	struct chtls_dev *cdev;
	struct sock *sk;
};

struct chtls_hws {
	struct sk_buff_head sk_recv_queue;
	u8 txqid;
@@ -215,6 +220,8 @@ struct chtls_sock {
	u16 resv2;
	u32 delack_mode;
	u32 delack_seq;
	u32 snd_win;
	u32 rcv_win;

	void *passive_reap_next;        /* placeholder for passive */
	struct chtls_hws tlshws;
+52 −26
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/if_vlan.h>
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/dst.h>

@@ -887,24 +888,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
	return mtu_idx;
}

static unsigned int select_rcv_wnd(struct chtls_sock *csk)
{
	unsigned int rcvwnd;
	unsigned int wnd;
	struct sock *sk;

	sk = csk->sk;
	wnd = tcp_full_space(sk);

	if (wnd < MIN_RCV_WND)
		wnd = MIN_RCV_WND;

	rcvwnd = MAX_RCV_WND;

	csk_set_flag(csk, CSK_UPDATE_RCV_WND);
	return min(wnd, rcvwnd);
}

static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
{
	int wscale = 0;
@@ -951,7 +934,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
	csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
					req);
	opt0 = TCAM_BYPASS_F |
	       WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
	       WND_SCALE_V(RCV_WSCALE(tp)) |
	       MSS_IDX_V(csk->mtu_idx) |
	       L2T_IDX_V(csk->l2t_entry->idx) |
	       NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
@@ -1005,6 +988,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
	return 0;
}

static void chtls_set_tcp_window(struct chtls_sock *csk)
{
	struct net_device *ndev = csk->egress_dev;
	struct port_info *pi = netdev_priv(ndev);
	unsigned int linkspeed;
	u8 scale;

	linkspeed = pi->link_cfg.speed;
	scale = linkspeed / SPEED_10000;
#define CHTLS_10G_RCVWIN (256 * 1024)
	csk->rcv_win = CHTLS_10G_RCVWIN;
	if (scale)
		csk->rcv_win *= scale;
#define CHTLS_10G_SNDWIN (256 * 1024)
	csk->snd_win = CHTLS_10G_SNDWIN;
	if (scale)
		csk->snd_win *= scale;
}

static struct sock *chtls_recv_sock(struct sock *lsk,
				    struct request_sock *oreq,
				    void *network_hdr,
@@ -1067,6 +1069,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
	csk->port_id = port_id;
	csk->egress_dev = ndev;
	csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
	chtls_set_tcp_window(csk);
	tp->rcv_wnd = csk->rcv_win;
	csk->sndbuf = csk->snd_win;
	csk->ulp_mode = ULP_MODE_TLS;
	step = cdev->lldi->nrxq / cdev->lldi->nchan;
	csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
@@ -1076,9 +1081,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
	csk->sndbuf = newsk->sk_sndbuf;
	csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
					 cxgb4_port_viid(ndev));
	tp->rcv_wnd = select_rcv_wnd(csk);
	RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
					   WSCALE_OK(tp),
					   sock_net(newsk)->
						ipv4.sysctl_tcp_window_scaling,
					   tp->window_clamp);
	neigh_release(n);
	inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1130,6 +1135,7 @@ static void chtls_pass_accept_request(struct sock *sk,
	struct cpl_t5_pass_accept_rpl *rpl;
	struct cpl_pass_accept_req *req;
	struct listen_ctx *listen_ctx;
	struct vlan_ethhdr *vlan_eh;
	struct request_sock *oreq;
	struct sk_buff *reply_skb;
	struct chtls_sock *csk;
@@ -1142,6 +1148,10 @@ static void chtls_pass_accept_request(struct sock *sk,
	unsigned int stid;
	unsigned int len;
	unsigned int tid;
	bool th_ecn, ect;
	__u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
	u16 eth_hdr_len;
	bool ecn_ok;

	req = cplhdr(skb) + RSS_HDR;
	tid = GET_TID(req);
@@ -1180,24 +1190,40 @@ static void chtls_pass_accept_request(struct sock *sk,
	oreq->mss = 0;
	oreq->ts_recent = 0;

	eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
	if (eth_hdr_len == ETH_HLEN) {
		eh = (struct ethhdr *)(req + 1);
		iph = (struct iphdr *)(eh + 1);
		network_hdr = (void *)(eh + 1);
	} else {
		vlan_eh = (struct vlan_ethhdr *)(req + 1);
		iph = (struct iphdr *)(vlan_eh + 1);
		network_hdr = (void *)(vlan_eh + 1);
	}
	if (iph->version != 0x4)
		goto free_oreq;

	network_hdr = (void *)(eh + 1);
	tcph = (struct tcphdr *)(iph + 1);
	skb_set_network_header(skb, (void *)iph - (void *)req);

	tcp_rsk(oreq)->tfo_listener = false;
	tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
	chtls_set_req_port(oreq, tcph->source, tcph->dest);
	inet_rsk(oreq)->ecn_ok = 0;
	chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
	if (req->tcpopt.wsf <= 14) {
	ip_dsfield = ipv4_get_dsfield(iph);
	if (req->tcpopt.wsf <= 14 &&
	    sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
		inet_rsk(oreq)->wscale_ok = 1;
		inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
	}
	inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
	th_ecn = tcph->ece && tcph->cwr;
	if (th_ecn) {
		ect = !INET_ECN_is_not_ect(ip_dsfield);
		ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
		if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
			inet_rsk(oreq)->ecn_ok = 1;
	}

	newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
	if (!newsk)
+8 −12
Original line number Diff line number Diff line
@@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,

	req_wr->lsodisable_to_flags =
			htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
			      FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
			      TX_URG_V(skb_urgent(skb)) |
			      T6_TX_FORCE_F | wr_ulp_mode_force |
			      TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
					 skb_queue_empty(&csk->txq)));
@@ -534,9 +534,8 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
				FW_OFLD_TX_DATA_WR_SHOVE_F);

	req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
			FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
			FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
					(sk, CSK_TX_MORE_DATA)) &&
			TX_URG_V(skb_urgent(skb)) |
			TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
				   skb_queue_empty(&csk->txq)));
	req->plen = htonl(len);
}
@@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
	int mss, flags, err;
	int recordsz = 0;
	int copied = 0;
	int hdrlen = 0;
	long timeo;

	lock_sock(sk);
@@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)

			recordsz = tls_header_read(&hdr, &msg->msg_iter);
			size -= TLS_HEADER_LENGTH;
			hdrlen += TLS_HEADER_LENGTH;
			copied += TLS_HEADER_LENGTH;
			csk->tlshws.txleft = recordsz;
			csk->tlshws.type = hdr.type;
			if (skb)
@@ -1083,9 +1081,7 @@ new_buf:
			int off = TCP_OFF(sk);
			bool merge;

			if (!page)
				goto wait_for_memory;

			if (page)
				pg_size <<= compound_order(page);
			if (off < pg_size &&
			    skb_can_coalesce(skb, i, page, off)) {
@@ -1187,7 +1183,7 @@ out:
		chtls_tcp_push(sk, flags);
done:
	release_sock(sk);
	return copied + hdrlen;
	return copied;
do_fault:
	if (!skb->len) {
		__skb_unlink(skb, &csk->txq);
+63 −42
Original line number Diff line number Diff line
@@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb)
static int listen_notify_handler(struct notifier_block *this,
				 unsigned long event, void *data)
{
	struct chtls_dev *cdev;
	struct sock *sk;
	int ret;
	struct chtls_listen *clisten;
	int ret = NOTIFY_DONE;

	sk = data;
	ret =  NOTIFY_DONE;
	clisten = (struct chtls_listen *)data;

	switch (event) {
	case CHTLS_LISTEN_START:
		ret = chtls_listen_start(clisten->cdev, clisten->sk);
		kfree(clisten);
		break;
	case CHTLS_LISTEN_STOP:
		mutex_lock(&cdev_list_lock);
		list_for_each_entry(cdev, &cdev_list, list) {
			if (event == CHTLS_LISTEN_START)
				ret = chtls_listen_start(cdev, sk);
			else
				chtls_listen_stop(cdev, sk);
		}
		mutex_unlock(&cdev_list_lock);
		chtls_listen_stop(clisten->cdev, clisten->sk);
		kfree(clisten);
		break;
	}
	return ret;
@@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
	return 0;
}

static int chtls_start_listen(struct sock *sk)
static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
	struct chtls_listen *clisten;
	int err;

	if (sk->sk_protocol != IPPROTO_TCP)
@@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk)
		return -EADDRNOTAVAIL;

	sk->sk_backlog_rcv = listen_backlog_rcv;
	clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
	if (!clisten)
		return -ENOMEM;
	clisten->cdev = cdev;
	clisten->sk = sk;
	mutex_lock(&notify_mutex);
	err = raw_notifier_call_chain(&listen_notify_list,
				      CHTLS_LISTEN_START, sk);
				      CHTLS_LISTEN_START, clisten);
	mutex_unlock(&notify_mutex);
	return err;
}

static void chtls_stop_listen(struct sock *sk)
static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
{
	struct chtls_listen *clisten;

	if (sk->sk_protocol != IPPROTO_TCP)
		return;

	clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
	if (!clisten)
		return;
	clisten->cdev = cdev;
	clisten->sk = sk;
	mutex_lock(&notify_mutex);
	raw_notifier_call_chain(&listen_notify_list,
				CHTLS_LISTEN_STOP, sk);
				CHTLS_LISTEN_STOP, clisten);
	mutex_unlock(&notify_mutex);
}

@@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev)

static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
{
	struct chtls_dev *cdev = to_chtls_dev(dev);

	if (sk->sk_state == TCP_LISTEN)
		return chtls_start_listen(sk);
		return chtls_start_listen(cdev, sk);
	return 0;
}

static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
{
	struct chtls_dev *cdev = to_chtls_dev(dev);

	if (sk->sk_state == TCP_LISTEN)
		chtls_stop_listen(sk);
		chtls_stop_listen(cdev, sk);
}

static void chtls_free_uld(struct chtls_dev *cdev)
{
	int i;

	tls_unregister_device(&cdev->tlsdev);
	kvfree(cdev->kmap.addr);
	idr_destroy(&cdev->hwtid_idr);
	for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
		kfree_skb(cdev->rspq_skb_cache[i]);
	kfree(cdev->lldi);
	kfree_skb(cdev->askb);
	kfree(cdev);
}

static inline void chtls_dev_release(struct kref *kref)
{
	struct chtls_dev *cdev;
	struct tls_device *dev;

	dev = container_of(kref, struct tls_device, kref);
	cdev = to_chtls_dev(dev);
	chtls_free_uld(cdev);
}

static void chtls_register_dev(struct chtls_dev *cdev)
@@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev)
	tlsdev->feature = chtls_inline_feature;
	tlsdev->hash = chtls_create_hash;
	tlsdev->unhash = chtls_destroy_hash;
	tls_register_device(&cdev->tlsdev);
	tlsdev->release = chtls_dev_release;
	kref_init(&tlsdev->kref);
	tls_register_device(tlsdev);
	cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}

static void chtls_unregister_dev(struct chtls_dev *cdev)
{
	tls_unregister_device(&cdev->tlsdev);
}

static void process_deferq(struct work_struct *task_param)
{
	struct chtls_dev *cdev = container_of(task_param,
@@ -262,28 +295,16 @@ out:
	return NULL;
}

static void chtls_free_uld(struct chtls_dev *cdev)
{
	int i;

	chtls_unregister_dev(cdev);
	kvfree(cdev->kmap.addr);
	idr_destroy(&cdev->hwtid_idr);
	for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
		kfree_skb(cdev->rspq_skb_cache[i]);
	kfree(cdev->lldi);
	kfree_skb(cdev->askb);
	kfree(cdev);
}

static void chtls_free_all_uld(void)
{
	struct chtls_dev *cdev, *tmp;

	mutex_lock(&cdev_mutex);
	list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
		if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
			chtls_free_uld(cdev);
		if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
			list_del(&cdev->list);
			kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
		}
	}
	mutex_unlock(&cdev_mutex);
}
@@ -304,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
		mutex_lock(&cdev_mutex);
		list_del(&cdev->list);
		mutex_unlock(&cdev_mutex);
		chtls_free_uld(cdev);
		kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
		break;
	default:
		break;
Loading