Commit 3f087668 authored by Simon Horman's avatar Simon Horman
Browse files
parents 51df1901 e5befbd9
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -363,6 +363,11 @@ This rule exists because users of the rfkill subsystem expect to get (and set,
when possible) the overall transmitter rfkill state, not of a particular rfkill
line.

5. During suspend, the rfkill class will attempt to soft-block the radio
through a call to rfkill->toggle_radio, and will try to restore its previous
state during resume.  After a rfkill class is suspended, it will *not* call
rfkill->toggle_radio until it is resumed.

Example of a WLAN wireless driver connected to the rfkill subsystem:
--------------------------------------------------------------------

+1 −0
Original line number Diff line number Diff line
@@ -1571,6 +1571,7 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)

	return half_md4_transform(hash, keyptr->secret);
}
EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);

#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+1 −1
Original line number Diff line number Diff line
@@ -35,8 +35,8 @@
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#include <linux/if_vlan.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
+0 −67
Original line number Diff line number Diff line
@@ -64,68 +64,6 @@ struct pcpu_lstats {
	unsigned long bytes;
};

/* KISS: just allocate small chunks and copy bits.
 *
 * So, in fact, this is documentation, explaining what we expect
 * of largesending device modulo TCP checksum, which is ignored for loopback.
 */

#ifdef LOOPBACK_TSO
static void emulate_large_send_offload(struct sk_buff *skb)
{
	struct iphdr *iph = ip_hdr(skb);
	struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) +
					      (iph->ihl * 4));
	unsigned int doffset = (iph->ihl + th->doff) * 4;
	unsigned int mtu = skb_shinfo(skb)->gso_size + doffset;
	unsigned int offset = 0;
	u32 seq = ntohl(th->seq);
	u16 id  = ntohs(iph->id);

	while (offset + doffset < skb->len) {
		unsigned int frag_size = min(mtu, skb->len - offset) - doffset;
		struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC);

		if (!nskb)
			break;
		skb_reserve(nskb, 32);
		skb_set_mac_header(nskb, -ETH_HLEN);
		skb_reset_network_header(nskb);
		iph = ip_hdr(nskb);
		skb_copy_to_linear_data(nskb, skb_network_header(skb),
					doffset);
		if (skb_copy_bits(skb,
				  doffset + offset,
				  nskb->data + doffset,
				  frag_size))
			BUG();
		skb_put(nskb, doffset + frag_size);
		nskb->ip_summed = CHECKSUM_UNNECESSARY;
		nskb->dev = skb->dev;
		nskb->priority = skb->priority;
		nskb->protocol = skb->protocol;
		nskb->dst = dst_clone(skb->dst);
		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
		nskb->pkt_type = skb->pkt_type;

		th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4);
		iph->tot_len = htons(frag_size + doffset);
		iph->id = htons(id);
		iph->check = 0;
		iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl);
		th->seq = htonl(seq);
		if (offset + doffset + frag_size < skb->len)
			th->fin = th->psh = 0;
		netif_rx(nskb);
		offset += frag_size;
		seq += frag_size;
		id++;
	}

	dev_kfree_skb(skb);
}
#endif /* LOOPBACK_TSO */

/*
 * The higher levels take care of making this non-reentrant (it's
 * called with bh's disabled).
@@ -137,9 +75,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
	skb_orphan(skb);

	skb->protocol = eth_type_trans(skb,dev);
#ifndef LOOPBACK_MUST_CHECKSUM
	skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif

#ifdef LOOPBACK_TSO
	if (skb_is_gso(skb)) {
@@ -234,9 +169,7 @@ static void loopback_setup(struct net_device *dev)
	dev->type		= ARPHRD_LOOPBACK;	/* 0x0001*/
	dev->flags		= IFF_LOOPBACK;
	dev->features 		= NETIF_F_SG | NETIF_F_FRAGLIST
#ifdef LOOPBACK_TSO
		| NETIF_F_TSO
#endif
		| NETIF_F_NO_CSUM
		| NETIF_F_HIGHDMA
		| NETIF_F_LLTX
+101 −4
Original line number Diff line number Diff line
@@ -358,6 +358,66 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
	return mask;
}

/* prepad is the amount to reserve at front.  len is length after that.
 * linear is a hint as to how much to copy (usually headers). */
static struct sk_buff *tun_alloc_skb(size_t prepad, size_t len, size_t linear,
				     gfp_t gfp)
{
	struct sk_buff *skb;
	unsigned int i;

	skb = alloc_skb(prepad + len, gfp|__GFP_NOWARN);
	if (skb) {
		skb_reserve(skb, prepad);
		skb_put(skb, len);
		return skb;
	}

	/* Under a page?  Don't bother with paged skb. */
	if (prepad + len < PAGE_SIZE)
		return NULL;

	/* Start with a normal skb, and add pages. */
	skb = alloc_skb(prepad + linear, gfp);
	if (!skb)
		return NULL;

	skb_reserve(skb, prepad);
	skb_put(skb, linear);

	len -= linear;

	for (i = 0; i < MAX_SKB_FRAGS; i++) {
		skb_frag_t *f = &skb_shinfo(skb)->frags[i];

		f->page = alloc_page(gfp|__GFP_ZERO);
		if (!f->page)
			break;

		f->page_offset = 0;
		f->size = PAGE_SIZE;

		skb->data_len += PAGE_SIZE;
		skb->len += PAGE_SIZE;
		skb->truesize += PAGE_SIZE;
		skb_shinfo(skb)->nr_frags++;

		if (len < PAGE_SIZE) {
			len = 0;
			break;
		}
		len -= PAGE_SIZE;
	}

	/* Too large, or alloc fail? */
	if (unlikely(len)) {
		kfree_skb(skb);
		skb = NULL;
	}

	return skb;
}

/* Get packet from user space buffer */
static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv, size_t count)
{
@@ -391,14 +451,12 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
			return -EINVAL;
	}

	if (!(skb = alloc_skb(len + align, GFP_KERNEL))) {
	if (!(skb = tun_alloc_skb(align, len, gso.hdr_len, GFP_KERNEL))) {
		tun->dev->stats.rx_dropped++;
		return -ENOMEM;
	}

	if (align)
		skb_reserve(skb, align);
	if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
	if (skb_copy_datagram_from_iovec(skb, 0, iv, len)) {
		tun->dev->stats.rx_dropped++;
		kfree_skb(skb);
		return -EFAULT;
@@ -748,6 +806,36 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
	return err;
}

static int tun_get_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
	struct tun_struct *tun = file->private_data;

	if (!tun)
		return -EBADFD;

	DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name);

	strcpy(ifr->ifr_name, tun->dev->name);

	ifr->ifr_flags = 0;

	if (ifr->ifr_flags & TUN_TUN_DEV)
		ifr->ifr_flags |= IFF_TUN;
	else
		ifr->ifr_flags |= IFF_TAP;

	if (tun->flags & TUN_NO_PI)
		ifr->ifr_flags |= IFF_NO_PI;

	if (tun->flags & TUN_ONE_QUEUE)
		ifr->ifr_flags |= IFF_ONE_QUEUE;

	if (tun->flags & TUN_VNET_HDR)
		ifr->ifr_flags |= IFF_VNET_HDR;

	return 0;
}

/* This is like a cut-down ethtool ops, except done via tun fd so no
 * privs required. */
static int set_offload(struct net_device *dev, unsigned long arg)
@@ -833,6 +921,15 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
	DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd);

	switch (cmd) {
	case TUNGETIFF:
		ret = tun_get_iff(current->nsproxy->net_ns, file, &ifr);
		if (ret)
			return ret;

		if (copy_to_user(argp, &ifr, sizeof(ifr)))
			return -EFAULT;
		break;

	case TUNSETNOCSUM:
		/* Disable/Enable checksum */
		if (arg)
Loading