Commit d0fa9250 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull Hyper-V updates from Sasha Levin:

 - Most of the commits here are work to enable host-initiated
   hibernation support by Dexuan Cui.

 - Fix for a warning shown when host sends non-aligned balloon requests
   by Tianyu Lan.

* tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  hv_utils: Add the support of hibernation
  hv_utils: Support host-initiated hibernation request
  hv_utils: Support host-initiated restart request
  Tools: hv: Reopen the devices if read() or write() returns errors
  video: hyperv: hyperv_fb: Use physical memory for fb on HyperV Gen 1 VMs.
  Drivers: hv: vmbus: Ignore CHANNELMSG_TL_CONNECT_RESULT(23)
  video: hyperv_fb: Fix hibernation for the deferred IO feature
  Input: hyperv-keyboard: Add the support of hibernation
  hv_balloon: Balloon up according to request page number
parents 46d6b7be 54e19d34
Loading
Loading
Loading
Loading
+7 −14
Original line number Diff line number Diff line
@@ -1351,6 +1351,8 @@ channel_message_table[CHANNELMSG_COUNT] = {
	{ CHANNELMSG_19,			0, NULL },
	{ CHANNELMSG_20,			0, NULL },
	{ CHANNELMSG_TL_CONNECT_REQUEST,	0, NULL },
	{ CHANNELMSG_22,			0, NULL },
	{ CHANNELMSG_TL_CONNECT_RESULT,		0, NULL },
};

/*
@@ -1362,25 +1364,16 @@ void vmbus_onmessage(void *context)
{
	struct hv_message *msg = context;
	struct vmbus_channel_message_header *hdr;
	int size;

	hdr = (struct vmbus_channel_message_header *)msg->u.payload;
	size = msg->header.payload_size;

	trace_vmbus_on_message(hdr);

	if (hdr->msgtype >= CHANNELMSG_COUNT) {
		pr_err("Received invalid channel message type %d size %d\n",
			   hdr->msgtype, size);
		print_hex_dump_bytes("", DUMP_PREFIX_NONE,
				     (unsigned char *)msg->u.payload, size);
		return;
	}

	if (channel_message_table[hdr->msgtype].message_handler)
	/*
	 * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
	 * out of bound and the message_handler pointer can not be NULL.
	 */
	channel_message_table[hdr->msgtype].message_handler(hdr);
	else
		pr_err("Unhandled channel message type %d\n", hdr->msgtype);
}

/*
+3 −10
Original line number Diff line number Diff line
@@ -1217,10 +1217,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
	unsigned int i, j;
	struct page *pg;

	if (num_pages < alloc_unit)
		return 0;

	for (i = 0; (i * alloc_unit) < num_pages; i++) {
	for (i = 0; i < num_pages / alloc_unit; i++) {
		if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
			HV_HYP_PAGE_SIZE)
			return i * alloc_unit;
@@ -1258,7 +1255,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,

	}

	return num_pages;
	return i * alloc_unit;
}

static void balloon_up(struct work_struct *dummy)
@@ -1273,9 +1270,6 @@ static void balloon_up(struct work_struct *dummy)
	long avail_pages;
	unsigned long floor;

	/* The host balloons pages in 2M granularity. */
	WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);

	/*
	 * We will attempt 2M allocations. However, if we fail to
	 * allocate 2M chunks, we will go back to PAGE_SIZE allocations.
@@ -1285,14 +1279,13 @@ static void balloon_up(struct work_struct *dummy)
	avail_pages = si_mem_available();
	floor = compute_balloon_floor();

	/* Refuse to balloon below the floor, keep the 2M granularity. */
	/* Refuse to balloon below the floor. */
	if (avail_pages < num_pages || avail_pages - num_pages < floor) {
		pr_warn("Balloon request will be partially fulfilled. %s\n",
			avail_pages < num_pages ? "Not enough memory." :
			"Balloon floor reached.");

		num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
		num_pages -= num_pages % PAGES_IN_2M;
	}

	while (!done) {
+53 −1
Original line number Diff line number Diff line
@@ -346,9 +346,61 @@ int hv_fcopy_init(struct hv_util_service *srv)
	return 0;
}

static void hv_fcopy_cancel_work(void)
{
	cancel_delayed_work_sync(&fcopy_timeout_work);
	cancel_work_sync(&fcopy_send_work);
}

int hv_fcopy_pre_suspend(void)
{
	struct vmbus_channel *channel = fcopy_transaction.recv_channel;
	struct hv_fcopy_hdr *fcopy_msg;

	/*
	 * Fake a CANCEL_FCOPY message for the user space daemon in case the
	 * daemon is in the middle of copying some file. It doesn't matter if
	 * there is already a message pending to be delivered to the user
	 * space since we force fcopy_transaction.state to be HVUTIL_READY, so
	 * the user space daemon's write() will fail with EINVAL (see
	 * fcopy_on_msg()), and the daemon will reset the device by closing
	 * and re-opening it.
	 */
	fcopy_msg = kzalloc(sizeof(*fcopy_msg), GFP_KERNEL);
	if (!fcopy_msg)
		return -ENOMEM;

	tasklet_disable(&channel->callback_event);

	fcopy_msg->operation = CANCEL_FCOPY;

	hv_fcopy_cancel_work();

	/* We don't care about the return value. */
	hvutil_transport_send(hvt, fcopy_msg, sizeof(*fcopy_msg), NULL);

	kfree(fcopy_msg);

	fcopy_transaction.state = HVUTIL_READY;

	/* tasklet_enable() will be called in hv_fcopy_pre_resume(). */
	return 0;
}

int hv_fcopy_pre_resume(void)
{
	struct vmbus_channel *channel = fcopy_transaction.recv_channel;

	tasklet_enable(&channel->callback_event);

	return 0;
}

void hv_fcopy_deinit(void)
{
	fcopy_transaction.state = HVUTIL_DEVICE_DYING;
	cancel_delayed_work_sync(&fcopy_timeout_work);

	hv_fcopy_cancel_work();

	hvutil_transport_destroy(hvt);
}
+41 −2
Original line number Diff line number Diff line
@@ -758,11 +758,50 @@ hv_kvp_init(struct hv_util_service *srv)
	return 0;
}

void hv_kvp_deinit(void)
static void hv_kvp_cancel_work(void)
{
	kvp_transaction.state = HVUTIL_DEVICE_DYING;
	cancel_delayed_work_sync(&kvp_host_handshake_work);
	cancel_delayed_work_sync(&kvp_timeout_work);
	cancel_work_sync(&kvp_sendkey_work);
}

int hv_kvp_pre_suspend(void)
{
	struct vmbus_channel *channel = kvp_transaction.recv_channel;

	tasklet_disable(&channel->callback_event);

	/*
	 * If there is a pending transtion, it's unnecessary to tell the host
	 * that the transaction will fail, because that is implied when
	 * util_suspend() calls vmbus_close() later.
	 */
	hv_kvp_cancel_work();

	/*
	 * Forece the state to READY to handle the ICMSGTYPE_NEGOTIATE message
	 * later. The user space daemon may go out of order and its write()
	 * may fail with EINVAL: this doesn't matter since the daemon will
	 * reset the device by closing and re-opening it.
	 */
	kvp_transaction.state = HVUTIL_READY;
	return 0;
}

int hv_kvp_pre_resume(void)
{
	struct vmbus_channel *channel = kvp_transaction.recv_channel;

	tasklet_enable(&channel->callback_event);

	return 0;
}

void hv_kvp_deinit(void)
{
	kvp_transaction.state = HVUTIL_DEVICE_DYING;

	hv_kvp_cancel_work();

	hvutil_transport_destroy(hvt);
}
+53 −2
Original line number Diff line number Diff line
@@ -379,10 +379,61 @@ hv_vss_init(struct hv_util_service *srv)
	return 0;
}

void hv_vss_deinit(void)
static void hv_vss_cancel_work(void)
{
	vss_transaction.state = HVUTIL_DEVICE_DYING;
	cancel_delayed_work_sync(&vss_timeout_work);
	cancel_work_sync(&vss_handle_request_work);
}

int hv_vss_pre_suspend(void)
{
	struct vmbus_channel *channel = vss_transaction.recv_channel;
	struct hv_vss_msg *vss_msg;

	/*
	 * Fake a THAW message for the user space daemon in case the daemon
	 * has frozen the file systems. It doesn't matter if there is already
	 * a message pending to be delivered to the user space since we force
	 * vss_transaction.state to be HVUTIL_READY, so the user space daemon's
	 * write() will fail with EINVAL (see vss_on_msg()), and the daemon
	 * will reset the device by closing and re-opening it.
	 */
	vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
	if (!vss_msg)
		return -ENOMEM;

	tasklet_disable(&channel->callback_event);

	vss_msg->vss_hdr.operation = VSS_OP_THAW;

	/* Cancel any possible pending work. */
	hv_vss_cancel_work();

	/* We don't care about the return value. */
	hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);

	kfree(vss_msg);

	vss_transaction.state = HVUTIL_READY;

	/* tasklet_enable() will be called in hv_vss_pre_resume(). */
	return 0;
}

int hv_vss_pre_resume(void)
{
	struct vmbus_channel *channel = vss_transaction.recv_channel;

	tasklet_enable(&channel->callback_event);

	return 0;
}

void hv_vss_deinit(void)
{
	vss_transaction.state = HVUTIL_DEVICE_DYING;

	hv_vss_cancel_work();

	hvutil_transport_destroy(hvt);
}
Loading