Commit 2ad8b1ef authored by Alan D. Brunelle's avatar Alan D. Brunelle Committed by Jens Axboe
Browse files

Add UNPLUG traces to all appropriate places



Added blk_unplug interface, allowing all invocations of unplugs to result
in a generated blktrace UNPLUG.

Signed-off-by: default avatarAlan D. Brunelle <Alan.Brunelle@hp.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent d85532ed
Loading
Loading
Loading
Loading
+15 −9
Original line number Diff line number Diff line
@@ -1621,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
{
	struct request_queue *q = bdi->unplug_io_data;

	/*
	 * devices don't necessarily have an ->unplug_fn defined
	 */
	if (q->unplug_fn) {
		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
					q->rq.count[READ] + q->rq.count[WRITE]);

		q->unplug_fn(q);
	}
	blk_unplug(q);
}

static void blk_unplug_work(struct work_struct *work)
@@ -1653,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data)
	kblockd_schedule_work(&q->unplug_work);
}

void blk_unplug(struct request_queue *q)
{
	/*
	 * devices don't necessarily have an ->unplug_fn defined
	 */
	if (q->unplug_fn) {
		blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
					q->rq.count[READ] + q->rq.count[WRITE]);

		q->unplug_fn(q);
	}
}
EXPORT_SYMBOL(blk_unplug);

/**
 * blk_start_queue - restart a previously stopped queue
 * @q:    The &struct request_queue in question
+1 −2
Original line number Diff line number Diff line
@@ -1207,8 +1207,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
			prepare_to_wait(&bitmap->overflow_wait, &__wait,
					TASK_UNINTERRUPTIBLE);
			spin_unlock_irq(&bitmap->lock);
			bitmap->mddev->queue
				->unplug_fn(bitmap->mddev->queue);
			blk_unplug(bitmap->mddev->queue);
			schedule();
			finish_wait(&bitmap->overflow_wait, &__wait);
			continue;
+1 −2
Original line number Diff line number Diff line
@@ -1000,8 +1000,7 @@ void dm_table_unplug_all(struct dm_table *t)
		struct dm_dev *dd = list_entry(d, struct dm_dev, list);
		struct request_queue *q = bdev_get_queue(dd->bdev);

		if (q->unplug_fn)
			q->unplug_fn(q);
		blk_unplug(q);
	}
}

+1 −2
Original line number Diff line number Diff line
@@ -87,8 +87,7 @@ static void linear_unplug(struct request_queue *q)

	for (i=0; i < mddev->raid_disks; i++) {
		struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
		if (r_queue->unplug_fn)
			r_queue->unplug_fn(r_queue);
		blk_unplug(r_queue);
	}
}

+2 −2
Original line number Diff line number Diff line
@@ -5445,7 +5445,7 @@ void md_do_sync(mddev_t *mddev)
		 * about not overloading the IO subsystem. (things like an
		 * e2fsck being done on the RAID array should execute fast)
		 */
		mddev->queue->unplug_fn(mddev->queue);
		blk_unplug(mddev->queue);
		cond_resched();

		currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -5464,7 +5464,7 @@ void md_do_sync(mddev_t *mddev)
	 * this also signals 'finished resyncing' to md_stop
	 */
 out:
	mddev->queue->unplug_fn(mddev->queue);
	blk_unplug(mddev->queue);

	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));

Loading