Commit 724c15a4 authored by Vivek Goyal's avatar Vivek Goyal Committed by Miklos Szeredi
Browse files

virtiofs: Use completions while waiting for queue to be drained



While we wait for queue to finish draining, use completions instead of
usleep_range(). This is better way of waiting for event.

Signed-off-by: default avatarVivek Goyal <vgoyal@redhat.com>
Reviewed-by: default avatarStefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: default avatarMiklos Szeredi <mszeredi@redhat.com>
parent 1efcf39e
Loading
Loading
Loading
Loading
+29 −10
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@ struct virtio_fs_vq {
	struct fuse_dev *fud;
	bool connected;
	long in_flight;
	struct completion in_flight_zero; /* No inflight requests */
	char name[24];
} ____cacheline_aligned_in_smp;

@@ -85,6 +86,8 @@ static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
{
	WARN_ON(fsvq->in_flight <= 0);
	fsvq->in_flight--;
	if (!fsvq->in_flight)
		complete(&fsvq->in_flight_zero);
}

static void release_virtio_fs_obj(struct kref *ref)
@@ -115,22 +118,23 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
	WARN_ON(fsvq->in_flight < 0);

	/* Wait for in flight requests to finish.*/
	while (1) {
	spin_lock(&fsvq->lock);
		if (!fsvq->in_flight) {
	if (fsvq->in_flight) {
		/* We are holding virtio_fs_mutex. There should not be any
		 * waiters waiting for completion.
		 */
		reinit_completion(&fsvq->in_flight_zero);
		spin_unlock(&fsvq->lock);
			break;
		}
		wait_for_completion(&fsvq->in_flight_zero);
	} else {
		spin_unlock(&fsvq->lock);
		/* TODO use completion instead of timeout */
		usleep_range(1000, 2000);
	}

	flush_work(&fsvq->done_work);
	flush_delayed_work(&fsvq->dispatch_work);
}

static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
{
	struct virtio_fs_vq *fsvq;
	int i;
@@ -141,6 +145,19 @@ static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
	}
}

static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
{
	/* Provides mutual exclusion between ->remove and ->kill_sb
	 * paths. We don't want both of these draining queue at the
	 * same time. Current completion logic reinits completion
	 * and that means there should not be any other thread
	 * doing reinit or waiting for completion already.
	 */
	mutex_lock(&virtio_fs_mutex);
	virtio_fs_drain_all_queues_locked(fs);
	mutex_unlock(&virtio_fs_mutex);
}

static void virtio_fs_start_all_queues(struct virtio_fs *fs)
{
	struct virtio_fs_vq *fsvq;
@@ -581,6 +598,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
	INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs);
	INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work,
			virtio_fs_hiprio_dispatch_work);
	init_completion(&fs->vqs[VQ_HIPRIO].in_flight_zero);
	spin_lock_init(&fs->vqs[VQ_HIPRIO].lock);

	/* Initialize the requests virtqueues */
@@ -591,6 +609,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
				  virtio_fs_request_dispatch_work);
		INIT_LIST_HEAD(&fs->vqs[i].queued_reqs);
		INIT_LIST_HEAD(&fs->vqs[i].end_reqs);
		init_completion(&fs->vqs[i].in_flight_zero);
		snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name),
			 "requests.%u", i - VQ_REQUEST);
		callbacks[i] = virtio_fs_vq_done;
@@ -684,7 +703,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
	/* This device is going away. No one should get new reference */
	list_del_init(&fs->list);
	virtio_fs_stop_all_queues(fs);
	virtio_fs_drain_all_queues(fs);
	virtio_fs_drain_all_queues_locked(fs);
	vdev->config->reset(vdev);
	virtio_fs_cleanup_vqs(vdev, fs);