Commit 4907a43d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull Hyper-V updates from Wei Liu:

 - a series from Boqun Feng to support page size larger than 4K

 - a few miscellaneous clean-ups

* tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  hv: clocksource: Add notrace attribute to read_hv_sched_clock_*() functions
  x86/hyperv: Remove aliases with X64 in their name
  PCI: hv: Document missing hv_pci_protocol_negotiation() parameter
  scsi: storvsc: Support PAGE_SIZE larger than 4K
  Driver: hv: util: Use VMBUS_RING_SIZE() for ringbuffer sizes
  HID: hyperv: Use VMBUS_RING_SIZE() for ringbuffer sizes
  Input: hyperv-keyboard: Use VMBUS_RING_SIZE() for ringbuffer sizes
  hv_netvsc: Use HV_HYP_PAGE_SIZE for Hyper-V communication
  hv: hyperv.h: Introduce some hvpfn helper functions
  Drivers: hv: vmbus: Move virt_to_hvpfn() to hyperv header
  Drivers: hv: Use HV_HYP_PAGE in hv_synic_enable_regs()
  Drivers: hv: vmbus: Introduce types of GPADL
  Drivers: hv: vmbus: Move __vmbus_open()
  Drivers: hv: vmbus: Always use HV_HYP_PAGE_SIZE for gpadl
  drivers: hv: remove cast from hyperv_die_event
parents da9803df 1f3aed01
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -341,7 +341,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg)
	return read_hv_clock_tsc();
}

static u64 read_hv_sched_clock_tsc(void)
static u64 notrace read_hv_sched_clock_tsc(void)
{
	return (read_hv_clock_tsc() - hv_sched_clock_offset) *
		(NSEC_PER_SEC / HV_CLOCK_HZ);
@@ -404,7 +404,7 @@ static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
	return read_hv_clock_msr();
}

static u64 read_hv_sched_clock_msr(void)
static u64 notrace read_hv_sched_clock_msr(void)
{
	return (read_hv_clock_msr() - hv_sched_clock_offset) *
		(NSEC_PER_SEC / HV_CLOCK_HZ);
+2 −2
Original line number Diff line number Diff line
@@ -104,8 +104,8 @@ struct synthhid_input_report {

#pragma pack(pop)

#define INPUTVSC_SEND_RING_BUFFER_SIZE		(40 * 1024)
#define INPUTVSC_RECV_RING_BUFFER_SIZE		(40 * 1024)
#define INPUTVSC_SEND_RING_BUFFER_SIZE	VMBUS_RING_SIZE(36 * 1024)
#define INPUTVSC_RECV_RING_BUFFER_SIZE	VMBUS_RING_SIZE(36 * 1024)


enum pipe_prot_msg_type {
+283 −178
Original line number Diff line number Diff line
@@ -22,20 +22,97 @@

#include "hyperv_vmbus.h"

#define NUM_PAGES_SPANNED(addr, len) \
((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
/*
 * hv_gpadl_size - Return the real size of a gpadl, the size that Hyper-V uses
 *
 * For BUFFER gpadl, Hyper-V uses the exact same size as the guest does.
 *
 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the header
 * (because of the alignment requirement), however, the hypervisor only
 * uses the first HV_HYP_PAGE_SIZE as the header, therefore leaving a
 * (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap. And since there are two rings in a
 * ringbuffer, the total size for a RING gpadl that Hyper-V uses is the
 * total size that the guest uses minus twice of the gap size.
 */
static inline u32 hv_gpadl_size(enum hv_gpadl_type type, u32 size)
{
	switch (type) {
	case HV_GPADL_BUFFER:
		return size;
	case HV_GPADL_RING:
		/* The size of a ringbuffer must be page-aligned */
		BUG_ON(size % PAGE_SIZE);
		/*
		 * Two things to notice here:
		 * 1) We're processing two ring buffers as a unit
		 * 2) We're skipping any space larger than HV_HYP_PAGE_SIZE in
		 * the first guest-size page of each of the two ring buffers.
		 * So we effectively subtract out two guest-size pages, and add
		 * back two Hyper-V size pages.
		 */
		return size - 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
	}
	BUG();
	return 0;
}

static unsigned long virt_to_hvpfn(void *addr)
/*
 * hv_ring_gpadl_send_hvpgoffset - Calculate the send offset (in unit of
 *                                 HV_HYP_PAGE) in a ring gpadl based on the
 *                                 offset in the guest
 *
 * @offset: the offset (in bytes) where the send ringbuffer starts in the
 *               virtual address space of the guest
 */
static inline u32 hv_ring_gpadl_send_hvpgoffset(u32 offset)
{
	phys_addr_t paddr;

	if (is_vmalloc_addr(addr))
		paddr = page_to_phys(vmalloc_to_page(addr)) +
					 offset_in_page(addr);
	/*
	 * For RING gpadl, in each ring, the guest uses one PAGE_SIZE as the
	 * header (because of the alignment requirement), however, the
	 * hypervisor only uses the first HV_HYP_PAGE_SIZE as the header,
	 * therefore leaving a (PAGE_SIZE - HV_HYP_PAGE_SIZE) gap.
	 *
	 * And to calculate the effective send offset in gpadl, we need to
	 * substract this gap.
	 */
	return (offset - (PAGE_SIZE - HV_HYP_PAGE_SIZE)) >> HV_HYP_PAGE_SHIFT;
}

/*
 * hv_gpadl_hvpfn - Return the Hyper-V page PFN of the @i th Hyper-V page in
 *                  the gpadl
 *
 * @type: the type of the gpadl
 * @kbuffer: the pointer to the gpadl in the guest
 * @size: the total size (in bytes) of the gpadl
 * @send_offset: the offset (in bytes) where the send ringbuffer starts in the
 *               virtual address space of the guest
 * @i: the index
 */
static inline u64 hv_gpadl_hvpfn(enum hv_gpadl_type type, void *kbuffer,
				 u32 size, u32 send_offset, int i)
{
	int send_idx = hv_ring_gpadl_send_hvpgoffset(send_offset);
	unsigned long delta = 0UL;

	switch (type) {
	case HV_GPADL_BUFFER:
		break;
	case HV_GPADL_RING:
		if (i == 0)
			delta = 0;
		else if (i <= send_idx)
			delta = PAGE_SIZE - HV_HYP_PAGE_SIZE;
		else
		paddr = __pa(addr);
			delta = 2 * (PAGE_SIZE - HV_HYP_PAGE_SIZE);
		break;
	default:
		BUG();
		break;
	}

	return  paddr >> PAGE_SHIFT;
	return virt_to_hvpfn(kbuffer + delta + (HV_HYP_PAGE_SIZE * i));
}

/*
@@ -112,160 +189,6 @@ int vmbus_alloc_ring(struct vmbus_channel *newchannel,
}
EXPORT_SYMBOL_GPL(vmbus_alloc_ring);

static int __vmbus_open(struct vmbus_channel *newchannel,
		       void *userdata, u32 userdatalen,
		       void (*onchannelcallback)(void *context), void *context)
{
	struct vmbus_channel_open_channel *open_msg;
	struct vmbus_channel_msginfo *open_info = NULL;
	struct page *page = newchannel->ringbuffer_page;
	u32 send_pages, recv_pages;
	unsigned long flags;
	int err;

	if (userdatalen > MAX_USER_DEFINED_BYTES)
		return -EINVAL;

	send_pages = newchannel->ringbuffer_send_offset;
	recv_pages = newchannel->ringbuffer_pagecount - send_pages;

	if (newchannel->state != CHANNEL_OPEN_STATE)
		return -EINVAL;

	newchannel->state = CHANNEL_OPENING_STATE;
	newchannel->onchannel_callback = onchannelcallback;
	newchannel->channel_callback_context = context;

	err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
	if (err)
		goto error_clean_ring;

	err = hv_ringbuffer_init(&newchannel->inbound,
				 &page[send_pages], recv_pages);
	if (err)
		goto error_clean_ring;

	/* Establish the gpadl for the ring buffer */
	newchannel->ringbuffer_gpadlhandle = 0;

	err = vmbus_establish_gpadl(newchannel,
				    page_address(newchannel->ringbuffer_page),
				    (send_pages + recv_pages) << PAGE_SHIFT,
				    &newchannel->ringbuffer_gpadlhandle);
	if (err)
		goto error_clean_ring;

	/* Create and init the channel open message */
	open_info = kmalloc(sizeof(*open_info) +
			   sizeof(struct vmbus_channel_open_channel),
			   GFP_KERNEL);
	if (!open_info) {
		err = -ENOMEM;
		goto error_free_gpadl;
	}

	init_completion(&open_info->waitevent);
	open_info->waiting_channel = newchannel;

	open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
	open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
	open_msg->openid = newchannel->offermsg.child_relid;
	open_msg->child_relid = newchannel->offermsg.child_relid;
	open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
	open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
	open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);

	if (userdatalen)
		memcpy(open_msg->userdata, userdata, userdatalen);

	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
	list_add_tail(&open_info->msglistentry,
		      &vmbus_connection.chn_msg_list);
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);

	if (newchannel->rescind) {
		err = -ENODEV;
		goto error_free_info;
	}

	err = vmbus_post_msg(open_msg,
			     sizeof(struct vmbus_channel_open_channel), true);

	trace_vmbus_open(open_msg, err);

	if (err != 0)
		goto error_clean_msglist;

	wait_for_completion(&open_info->waitevent);

	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
	list_del(&open_info->msglistentry);
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);

	if (newchannel->rescind) {
		err = -ENODEV;
		goto error_free_info;
	}

	if (open_info->response.open_result.status) {
		err = -EAGAIN;
		goto error_free_info;
	}

	newchannel->state = CHANNEL_OPENED_STATE;
	kfree(open_info);
	return 0;

error_clean_msglist:
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
	list_del(&open_info->msglistentry);
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
error_free_info:
	kfree(open_info);
error_free_gpadl:
	vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
	newchannel->ringbuffer_gpadlhandle = 0;
error_clean_ring:
	hv_ringbuffer_cleanup(&newchannel->outbound);
	hv_ringbuffer_cleanup(&newchannel->inbound);
	newchannel->state = CHANNEL_OPEN_STATE;
	return err;
}

/*
 * vmbus_connect_ring - Open the channel but reuse ring buffer
 */
int vmbus_connect_ring(struct vmbus_channel *newchannel,
		       void (*onchannelcallback)(void *context), void *context)
{
	return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
}
EXPORT_SYMBOL_GPL(vmbus_connect_ring);

/*
 * vmbus_open - Open the specified channel.
 */
int vmbus_open(struct vmbus_channel *newchannel,
	       u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
	       void *userdata, u32 userdatalen,
	       void (*onchannelcallback)(void *context), void *context)
{
	int err;

	err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
			       recv_ringbuffer_size);
	if (err)
		return err;

	err = __vmbus_open(newchannel, userdata, userdatalen,
			   onchannelcallback, context);
	if (err)
		vmbus_free_ring(newchannel);

	return err;
}
EXPORT_SYMBOL_GPL(vmbus_open);

/* Used for Hyper-V Socket: a guest client's connect() to the host */
int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
				  const guid_t *shv_host_servie_id)
@@ -317,7 +240,8 @@ EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
/*
 * create_gpadl_header - Creates a gpadl for the specified buffer
 */
static int create_gpadl_header(void *kbuffer, u32 size,
static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
			       u32 size, u32 send_offset,
			       struct vmbus_channel_msginfo **msginfo)
{
	int i;
@@ -330,7 +254,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,

	int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;

	pagecount = size >> PAGE_SHIFT;
	pagecount = hv_gpadl_size(type, size) >> HV_HYP_PAGE_SHIFT;

	/* do we need a gpadl body msg */
	pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
@@ -357,10 +281,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
		gpadl_header->range_buflen = sizeof(struct gpa_range) +
					 pagecount * sizeof(u64);
		gpadl_header->range[0].byte_offset = 0;
		gpadl_header->range[0].byte_count = size;
		gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
		for (i = 0; i < pfncount; i++)
			gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
				kbuffer + PAGE_SIZE * i);
			gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
				type, kbuffer, size, send_offset, i);
		*msginfo = msgheader;

		pfnsum = pfncount;
@@ -411,8 +335,8 @@ static int create_gpadl_header(void *kbuffer, u32 size,
			 * so the hypervisor guarantees that this is ok.
			 */
			for (i = 0; i < pfncurr; i++)
				gpadl_body->pfn[i] = virt_to_hvpfn(
					kbuffer + PAGE_SIZE * (pfnsum + i));
				gpadl_body->pfn[i] = hv_gpadl_hvpfn(type,
					kbuffer, size, send_offset, pfnsum + i);

			/* add to msg header */
			list_add_tail(&msgbody->msglistentry,
@@ -438,10 +362,10 @@ static int create_gpadl_header(void *kbuffer, u32 size,
		gpadl_header->range_buflen = sizeof(struct gpa_range) +
					 pagecount * sizeof(u64);
		gpadl_header->range[0].byte_offset = 0;
		gpadl_header->range[0].byte_count = size;
		gpadl_header->range[0].byte_count = hv_gpadl_size(type, size);
		for (i = 0; i < pagecount; i++)
			gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
				kbuffer + PAGE_SIZE * i);
			gpadl_header->range[0].pfn_array[i] = hv_gpadl_hvpfn(
				type, kbuffer, size, send_offset, i);

		*msginfo = msgheader;
	}
@@ -454,15 +378,20 @@ nomem:
}

/*
 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
 * __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
 *
 * @channel: a channel
 * @type: the type of the corresponding GPADL, only meaningful for the guest.
 * @kbuffer: from kmalloc or vmalloc
 * @size: page-size multiple
 * @send_offset: the offset (in bytes) where the send ring buffer starts,
 * 		 should be 0 for BUFFER type gpadl
 * @gpadl_handle: some funky thing
 */
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
			       u32 size, u32 *gpadl_handle)
static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
				   enum hv_gpadl_type type, void *kbuffer,
				   u32 size, u32 send_offset,
				   u32 *gpadl_handle)
{
	struct vmbus_channel_gpadl_header *gpadlmsg;
	struct vmbus_channel_gpadl_body *gpadl_body;
@@ -476,7 +405,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
	next_gpadl_handle =
		(atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);

	ret = create_gpadl_header(kbuffer, size, &msginfo);
	ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
	if (ret)
		return ret;

@@ -557,8 +486,184 @@ cleanup:
	kfree(msginfo);
	return ret;
}

/*
 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
 *
 * @channel: a channel
 * @kbuffer: from kmalloc or vmalloc
 * @size: page-size multiple
 * @gpadl_handle: some funky thing
 */
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
			  u32 size, u32 *gpadl_handle)
{
	return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
				       0U, gpadl_handle);
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);

static int __vmbus_open(struct vmbus_channel *newchannel,
		       void *userdata, u32 userdatalen,
		       void (*onchannelcallback)(void *context), void *context)
{
	struct vmbus_channel_open_channel *open_msg;
	struct vmbus_channel_msginfo *open_info = NULL;
	struct page *page = newchannel->ringbuffer_page;
	u32 send_pages, recv_pages;
	unsigned long flags;
	int err;

	if (userdatalen > MAX_USER_DEFINED_BYTES)
		return -EINVAL;

	send_pages = newchannel->ringbuffer_send_offset;
	recv_pages = newchannel->ringbuffer_pagecount - send_pages;

	if (newchannel->state != CHANNEL_OPEN_STATE)
		return -EINVAL;

	newchannel->state = CHANNEL_OPENING_STATE;
	newchannel->onchannel_callback = onchannelcallback;
	newchannel->channel_callback_context = context;

	err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
	if (err)
		goto error_clean_ring;

	err = hv_ringbuffer_init(&newchannel->inbound,
				 &page[send_pages], recv_pages);
	if (err)
		goto error_clean_ring;

	/* Establish the gpadl for the ring buffer */
	newchannel->ringbuffer_gpadlhandle = 0;

	err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
				      page_address(newchannel->ringbuffer_page),
				      (send_pages + recv_pages) << PAGE_SHIFT,
				      newchannel->ringbuffer_send_offset << PAGE_SHIFT,
				      &newchannel->ringbuffer_gpadlhandle);
	if (err)
		goto error_clean_ring;

	/* Create and init the channel open message */
	open_info = kmalloc(sizeof(*open_info) +
			   sizeof(struct vmbus_channel_open_channel),
			   GFP_KERNEL);
	if (!open_info) {
		err = -ENOMEM;
		goto error_free_gpadl;
	}

	init_completion(&open_info->waitevent);
	open_info->waiting_channel = newchannel;

	open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
	open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
	open_msg->openid = newchannel->offermsg.child_relid;
	open_msg->child_relid = newchannel->offermsg.child_relid;
	open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
	/*
	 * The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
	 * the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
	 * here we calculate it into HV_HYP_PAGE.
	 */
	open_msg->downstream_ringbuffer_pageoffset =
		hv_ring_gpadl_send_hvpgoffset(send_pages << PAGE_SHIFT);
	open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);

	if (userdatalen)
		memcpy(open_msg->userdata, userdata, userdatalen);

	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
	list_add_tail(&open_info->msglistentry,
		      &vmbus_connection.chn_msg_list);
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);

	if (newchannel->rescind) {
		err = -ENODEV;
		goto error_free_info;
	}

	err = vmbus_post_msg(open_msg,
			     sizeof(struct vmbus_channel_open_channel), true);

	trace_vmbus_open(open_msg, err);

	if (err != 0)
		goto error_clean_msglist;

	wait_for_completion(&open_info->waitevent);

	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
	list_del(&open_info->msglistentry);
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);

	if (newchannel->rescind) {
		err = -ENODEV;
		goto error_free_info;
	}

	if (open_info->response.open_result.status) {
		err = -EAGAIN;
		goto error_free_info;
	}

	newchannel->state = CHANNEL_OPENED_STATE;
	kfree(open_info);
	return 0;

error_clean_msglist:
	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
	list_del(&open_info->msglistentry);
	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
error_free_info:
	kfree(open_info);
error_free_gpadl:
	vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
	newchannel->ringbuffer_gpadlhandle = 0;
error_clean_ring:
	hv_ringbuffer_cleanup(&newchannel->outbound);
	hv_ringbuffer_cleanup(&newchannel->inbound);
	newchannel->state = CHANNEL_OPEN_STATE;
	return err;
}

/*
 * vmbus_connect_ring - Open the channel but reuse ring buffer
 */
int vmbus_connect_ring(struct vmbus_channel *newchannel,
		       void (*onchannelcallback)(void *context), void *context)
{
	return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
}
EXPORT_SYMBOL_GPL(vmbus_connect_ring);

/*
 * vmbus_open - Open the specified channel.
 */
int vmbus_open(struct vmbus_channel *newchannel,
	       u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
	       void *userdata, u32 userdatalen,
	       void (*onchannelcallback)(void *context), void *context)
{
	int err;

	err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
			       recv_ringbuffer_size);
	if (err)
		return err;

	err = __vmbus_open(newchannel, userdata, userdatalen,
			   onchannelcallback, context);
	if (err)
		vmbus_free_ring(newchannel);

	return err;
}
EXPORT_SYMBOL_GPL(vmbus_open);

/*
 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
 */
+2 −2
Original line number Diff line number Diff line
@@ -165,7 +165,7 @@ void hv_synic_enable_regs(unsigned int cpu)
	hv_get_simp(simp.as_uint64);
	simp.simp_enabled = 1;
	simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
		>> PAGE_SHIFT;
		>> HV_HYP_PAGE_SHIFT;

	hv_set_simp(simp.as_uint64);

@@ -173,7 +173,7 @@ void hv_synic_enable_regs(unsigned int cpu)
	hv_get_siefp(siefp.as_uint64);
	siefp.siefp_enabled = 1;
	siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
		>> PAGE_SHIFT;
		>> HV_HYP_PAGE_SHIFT;

	hv_set_siefp(siefp.as_uint64);

+7 −4
Original line number Diff line number Diff line
@@ -500,6 +500,9 @@ static void heartbeat_onchannelcallback(void *context)
	}
}

#define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
#define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)

static int util_probe(struct hv_device *dev,
			const struct hv_vmbus_device_id *dev_id)
{
@@ -530,8 +533,8 @@ static int util_probe(struct hv_device *dev,

	hv_set_drvdata(dev, srv);

	ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
			 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
	ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
			 HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
			 dev->channel);
	if (ret)
		goto error;
@@ -590,8 +593,8 @@ static int util_resume(struct hv_device *dev)
			return ret;
	}

	ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE,
			 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb,
	ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
			 HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
			 dev->channel);
	return ret;
}
Loading