Commit e523a256 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Various sockmap fixes from John Fastabend (pinned map handling,
    blocking in recvmsg, double page put, error handling during redirect
    failures, etc.)

 2) Fix dead code handling in x86-64 JIT, from Gianluca Borello.

 3) Missing device put in RDS IB code, from Dag Moxnes.

 4) Don't process fast open during repair mode in TCP< from Yuchung
    Cheng.

 5) Move address/port comparison fixes in SCTP, from Xin Long.

 6) Handle add a bond slave's master into a bridge properly, from
    Hangbin Liu.

 7) IPv6 multipath code can operate on unitialized memory due to an
    assumption that the icmp header is in the linear SKB area. Fix from
    Eric Dumazet.

 8) Don't invoke do_tcp_sendpages() recursively via TLS, from Dave
    Watson.

9) Fix memory leaks in x86-64 JIT, from Daniel Borkmann.

10) RDS leaks kernel memory to userspace, from Eric Dumazet.

11) DCCP can invoke a tasklet on a freed socket, take a refcount. Also
    from Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (78 commits)
  dccp: fix tasklet usage
  smc: fix sendpage() call
  net/smc: handle unregistered buffers
  net/smc: call consolidation
  qed: fix spelling mistake: "offloded" -> "offloaded"
  net/mlx5e: fix spelling mistake: "loobpack" -> "loopback"
  tcp: restore autocorking
  rds: do not leak kernel memory to user land
  qmi_wwan: do not steal interfaces from class drivers
  ipv4: fix fnhe usage by non-cached routes
  bpf: sockmap, fix error handling in redirect failures
  bpf: sockmap, zero sg_size on error when buffer is released
  bpf: sockmap, fix scatterlist update on error path in send with apply
  net_sched: fq: take care of throttled flows before reuse
  ipv6: Revert "ipv6: Allow non-gateway ECMP for IPv6"
  bpf, x64: fix memleak when not converging on calls
  bpf, x64: fix memleak when not converging after image
  net/smc: restrict non-blocking connect finish
  8139too: Use disable_irq_nosync() in rtl8139_poll_controller()
  sctp: fix the issue that the cookie-ack with auth can't get processed
  ...
parents bb609316 a8d7aa17
Loading
Loading
Loading
Loading
+9 −1
Original line number Diff line number Diff line
@@ -557,6 +557,14 @@ A: Although LLVM IR generation and optimization try to stay architecture
       pulls in some header files containing file scope host assembly codes.
     - You can add "-fno-jump-tables" to work around the switch table issue.

   Otherwise, you can use bpf target.
   Otherwise, you can use bpf target. Additionally, you _must_ use bpf target
   when:

     - Your program uses data structures with pointer or long / unsigned long
       types that interface with BPF helpers or context data structures. Access
       into these structures is verified by the BPF verifier and may result
       in verification failures if the native architecture is not aligned with
       the BPF architecture, e.g. 64-bit. An example of this is
       BPF_PROG_TYPE_SK_MSG require '-target bpf'

Happy BPF hacking!
+2 −0
Original line number Diff line number Diff line
@@ -9725,6 +9725,7 @@ W: https://fedorahosted.org/dropwatch/
F:	net/core/drop_monitor.c

NETWORKING DRIVERS
M:	"David S. Miller" <davem@davemloft.net>
L:	netdev@vger.kernel.org
W:	http://www.linuxfoundation.org/en/Net
Q:	http://patchwork.ozlabs.org/project/netdev/list/
@@ -12498,6 +12499,7 @@ F: drivers/scsi/st_*.h
SCTP PROTOCOL
M:	Vlad Yasevich <vyasevich@gmail.com>
M:	Neil Horman <nhorman@tuxdriver.com>
M:	Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
L:	linux-sctp@vger.kernel.org
W:	http://lksctp.sourceforge.net
S:	Maintained
+14 −4
Original line number Diff line number Diff line
@@ -1027,7 +1027,17 @@ emit_cond_jmp: /* convert BPF opcode to x86 */
			break;

		case BPF_JMP | BPF_JA:
			if (insn->off == -1)
				/* -1 jmp instructions will always jump
				 * backwards two bytes. Explicitly handling
				 * this case avoids wasting too many passes
				 * when there are long sequences of replaced
				 * dead code.
				 */
				jmp_offset = -2;
			else
				jmp_offset = addrs[i + insn->off] - addrs[i];

			if (!jmp_offset)
				/* optimize out nop jumps */
				break;
@@ -1226,6 +1236,7 @@ skip_init_addrs:
	for (pass = 0; pass < 20 || image; pass++) {
		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
		if (proglen <= 0) {
out_image:
			image = NULL;
			if (header)
				bpf_jit_binary_free(header);
@@ -1236,8 +1247,7 @@ skip_init_addrs:
			if (proglen != oldproglen) {
				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
				       proglen, oldproglen);
				prog = orig_prog;
				goto out_addrs;
				goto out_image;
			}
			break;
		}
@@ -1273,7 +1283,7 @@ skip_init_addrs:
		prog = orig_prog;
	}

	if (!prog->is_func || extra_pass) {
	if (!image || !prog->is_func || extra_pass) {
out_addrs:
		kfree(addrs);
		kfree(jit_data);
+1 −1
Original line number Diff line number Diff line
@@ -4757,7 +4757,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{
	struct mlx5_ib_dev *dev = to_mdev(ibdev);

	return mlx5_get_vector_affinity(dev->mdev, comp_vector);
	return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
}

/* The mlx5_ib_multiport_mutex should be held when calling this function */
+13 −5
Original line number Diff line number Diff line
@@ -2144,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
	.ndo_select_queue	= bcm_sysport_select_queue,
};

static int bcm_sysport_map_queues(struct net_device *dev,
static int bcm_sysport_map_queues(struct notifier_block *nb,
				  struct dsa_notifier_register_info *info)
{
	struct bcm_sysport_priv *priv = netdev_priv(dev);
	struct bcm_sysport_tx_ring *ring;
	struct bcm_sysport_priv *priv;
	struct net_device *slave_dev;
	unsigned int num_tx_queues;
	unsigned int q, start, port;
	struct net_device *dev;

	priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
	if (priv->netdev != info->master)
		return 0;

	dev = info->master;

	/* We can't be setting up queue inspection for non directly attached
	 * switches
@@ -2174,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev,
	if (priv->is_lite)
		netif_set_real_num_tx_queues(slave_dev,
					     slave_dev->num_tx_queues / 2);

	num_tx_queues = slave_dev->real_num_tx_queues;

	if (priv->per_port_num_tx_queues &&
	    priv->per_port_num_tx_queues != num_tx_queues)
		netdev_warn(slave_dev, "asymetric number of per-port queues\n");
		netdev_warn(slave_dev, "asymmetric number of per-port queues\n");

	priv->per_port_num_tx_queues = num_tx_queues;

@@ -2201,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
	return 0;
}

static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
				    unsigned long event, void *ptr)
{
	struct dsa_notifier_register_info *info;
@@ -2211,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused,

	info = ptr;

	return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
	return notifier_from_errno(bcm_sysport_map_queues(nb, info));
}

#define REV_FMT	"v%2x.%02x"
Loading