Commit 5529eaf6 authored by Stephen Hemminger's avatar Stephen Hemminger Committed by Greg Kroah-Hartman
Browse files

vmbus: remove conditional locking of vmbus_write



All current usage of vmbus write uses the acquire_lock flag, therefore
having it be optional is unnecessary. This also fixes a sparse warning
since sparse doesn't like when a function has conditional locking.

Signed-off-by: default avatarStephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b71e3282
Loading
Loading
Loading
Loading
+4 −9
Original line number Diff line number Diff line
@@ -651,7 +651,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
	u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
	struct kvec bufferlist[3];
	u64 aligned_data = 0;
	bool lock = channel->acquire_ring_lock;
	int num_vecs = ((bufferlen != 0) ? 3 : 1);


@@ -670,7 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
	bufferlist[2].iov_base = &aligned_data;
	bufferlist[2].iov_len = (packetlen_aligned - packetlen);

	return hv_ringbuffer_write(channel, bufferlist, num_vecs, lock);
	return hv_ringbuffer_write(channel, bufferlist, num_vecs);
}
EXPORT_SYMBOL(vmbus_sendpacket_ctl);

@@ -716,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
	u32 packetlen_aligned;
	struct kvec bufferlist[3];
	u64 aligned_data = 0;
	bool lock = channel->acquire_ring_lock;

	if (pagecount > MAX_PAGE_BUFFER_COUNT)
		return -EINVAL;


	/*
	 * Adjust the size down since vmbus_channel_packet_page_buffer is the
	 * largest size we support
@@ -753,7 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
	bufferlist[2].iov_base = &aligned_data;
	bufferlist[2].iov_len = (packetlen_aligned - packetlen);

	return hv_ringbuffer_write(channel, bufferlist, 3, lock);
	return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);

@@ -789,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
	u32 packetlen_aligned;
	struct kvec bufferlist[3];
	u64 aligned_data = 0;
	bool lock = channel->acquire_ring_lock;

	packetlen = desc_size + bufferlen;
	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -809,7 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
	bufferlist[2].iov_base = &aligned_data;
	bufferlist[2].iov_len = (packetlen_aligned - packetlen);

	return hv_ringbuffer_write(channel, bufferlist, 3, lock);
	return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);

@@ -827,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
	u32 packetlen_aligned;
	struct kvec bufferlist[3];
	u64 aligned_data = 0;
	bool lock = channel->acquire_ring_lock;
	u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
					 multi_pagebuffer->len);

@@ -866,7 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
	bufferlist[2].iov_base = &aligned_data;
	bufferlist[2].iov_len = (packetlen_aligned - packetlen);

	return hv_ringbuffer_write(channel, bufferlist, 3, lock);
	return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);

+0 −1
Original line number Diff line number Diff line
@@ -332,7 +332,6 @@ static struct vmbus_channel *alloc_channel(void)
	if (!channel)
		return NULL;

	channel->acquire_ring_lock = true;
	spin_lock_init(&channel->inbound_lock);
	spin_lock_init(&channel->lock);

+1 −2
Original line number Diff line number Diff line
@@ -283,8 +283,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);

int hv_ringbuffer_write(struct vmbus_channel *channel,
			struct kvec *kv_list,
			u32 kv_count, bool lock);
			struct kvec *kv_list, u32 kv_count);

int hv_ringbuffer_read(struct vmbus_channel *channel,
		       void *buffer, u32 buflen, u32 *buffer_actual_len,
+4 −7
Original line number Diff line number Diff line
@@ -284,7 +284,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)

/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
			struct kvec *kv_list, u32 kv_count, bool lock)
			struct kvec *kv_list, u32 kv_count)
{
	int i = 0;
	u32 bytes_avail_towrite;
@@ -304,7 +304,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,

	totalbytes_towrite += sizeof(u64);

	if (lock)
	spin_lock_irqsave(&outring_info->ring_lock, flags);

	bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
@@ -315,7 +314,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
	 * is empty since the read index == write index.
	 */
	if (bytes_avail_towrite <= totalbytes_towrite) {
		if (lock)
		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
		return -EAGAIN;
	}
@@ -347,7 +345,6 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
	hv_set_next_write_location(outring_info, next_write_location);


	if (lock)
	spin_unlock_irqrestore(&outring_info->ring_lock, flags);

	hv_signal_on_write(old_write, channel);
+0 −15
Original line number Diff line number Diff line
@@ -845,16 +845,6 @@ struct vmbus_channel {
	 * link up channels based on their CPU affinity.
	 */
	struct list_head percpu_list;
	/*
	 * On the channel send side, many of the VMBUS
	 * device drivers explicity serialize access to the
	 * outgoing ring buffer. Give more control to the
	 * VMBUS device drivers in terms how to serialize
	 * accesss to the outgoing ring buffer.
	 * The default behavior will be to aquire the
	 * ring lock to preserve the current behavior.
	 */
	bool acquire_ring_lock;
	/*
	 * For performance critical channels (storage, networking
	 * etc,), Hyper-V has a mechanism to enhance the throughput
@@ -895,11 +885,6 @@ struct vmbus_channel {

};

static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
{
	c->acquire_ring_lock = state;
}

static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
	return !!(c->offermsg.offer.chn_flags &