Commit 33c34c5e authored by alex.bluesman.smirnov@gmail.com's avatar alex.bluesman.smirnov@gmail.com Committed by David S. Miller
Browse files

6lowpan: rework fragment-deleting routine



6lowpan module starts collecting incomming frames and fragments
right after lowpan_module_init() therefor it will be better to
clean unfinished fragments in lowpan_cleanup_module() function
instead of doing it when link goes down.

Changed spinlocks type to prevent deadlock with expired timer event
and removed unused one.

Signed-off-by: default avatarAlexander Smirnov <alex.bluesman.smirnov@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent abbee2ef
Loading
Loading
Loading
Loading
+20 −19
Original line number Diff line number Diff line
@@ -113,7 +113,6 @@ struct lowpan_dev_record {

struct lowpan_fragment {
	struct sk_buff		*skb;		/* skb to be assembled */
	spinlock_t		lock;		/* concurency lock */
	u16			length;		/* length to be assemled */
	u32			bytes_rcv;	/* bytes received */
	u16			tag;		/* current fragment tag */
@@ -637,10 +636,7 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)

	pr_debug("timer expired for frame with tag %d\n", entry->tag);

	spin_lock(&flist_lock);
	list_del(&entry->list);
	spin_unlock(&flist_lock);

	dev_kfree_skb(entry->skb);
	kfree(entry);
}
@@ -727,7 +723,7 @@ lowpan_process_data(struct sk_buff *skb)
		 * check if frame assembling with the same tag is
		 * already in progress
		 */
		spin_lock(&flist_lock);
		spin_lock_bh(&flist_lock);

		list_for_each_entry(frame, &lowpan_fragments, list)
			if (frame->tag == tag) {
@@ -761,9 +757,9 @@ lowpan_process_data(struct sk_buff *skb)
		if ((frame->bytes_rcv == frame->length) &&
		     frame->timer.expires > jiffies) {
			/* if timer haven't expired - first of all delete it */
			del_timer(&frame->timer);
			del_timer_sync(&frame->timer);
			list_del(&frame->list);
			spin_unlock(&flist_lock);
			spin_unlock_bh(&flist_lock);

			dev_kfree_skb(skb);
			skb = frame->skb;
@@ -774,7 +770,7 @@ lowpan_process_data(struct sk_buff *skb)

			break;
		}
		spin_unlock(&flist_lock);
		spin_unlock_bh(&flist_lock);

		return kfree_skb(skb), 0;
	}
@@ -929,7 +925,7 @@ lowpan_process_data(struct sk_buff *skb)
	return lowpan_skb_deliver(skb, &hdr);

unlock_and_drop:
	spin_unlock(&flist_lock);
	spin_unlock_bh(&flist_lock);
drop:
	kfree_skb(skb);
	return -EINVAL;
@@ -1196,19 +1192,9 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
	struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
	struct net_device *real_dev = lowpan_dev->real_dev;
	struct lowpan_dev_record *entry, *tmp;
	struct lowpan_fragment *frame, *tframe;

	ASSERT_RTNL();

	spin_lock(&flist_lock);
	list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
		del_timer(&frame->timer);
		list_del(&frame->list);
		dev_kfree_skb(frame->skb);
		kfree(frame);
	}
	spin_unlock(&flist_lock);

	mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
	list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
		if (entry->ldev == dev) {
@@ -1264,9 +1250,24 @@ out:

static void __exit lowpan_cleanup_module(void)
{
	struct lowpan_fragment *frame, *tframe;

	lowpan_netlink_fini();

	dev_remove_pack(&lowpan_packet_type);

	/* Now 6lowpan packet_type is removed, so no new fragments are
	 * expected on RX, therefore that's the time to clean incomplete
	 * fragments.
	 */
	spin_lock_bh(&flist_lock);
	list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
		del_timer_sync(&frame->timer);
		list_del(&frame->list);
		dev_kfree_skb(frame->skb);
		kfree(frame);
	}
	spin_unlock_bh(&flist_lock);
}

module_init(lowpan_init_module);