Commit 4a8c31a1 authored by Dongli Zhang's avatar Dongli Zhang Committed by Konrad Rzeszutek Wilk
Browse files

xen/blkback: rework connect_ring() to avoid inconsistent xenstore...


xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront

The xenstore 'ring-page-order' is used globally for each blkback queue and
therefore should be read from xenstore only once. However, it is obtained
in read_per_ring_refs() which might be called multiple times during the
initialization of each blkback queue.

If the blkfront is malicious and the 'ring-page-order' is set in different
value by blkfront every time before blkback reads it, this may end up at
the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in
xen_blkif_disconnect() when frontend is destroyed.

This patch reworks connect_ring() to read xenstore 'ring-page-order' only
once.

Signed-off-by: default avatarDongli Zhang <dongli.zhang@oracle.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 5b8e432d
Loading
Loading
Loading
Loading
+43 −29
Original line number Diff line number Diff line
@@ -926,7 +926,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
	int err, i, j;
	struct xen_blkif *blkif = ring->blkif;
	struct xenbus_device *dev = blkif->be->dev;
	unsigned int ring_page_order, nr_grefs, evtchn;
	unsigned int nr_grefs, evtchn;

	err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
			  &evtchn);
@@ -936,43 +936,42 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
		return err;
	}

	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
			  &ring_page_order);
	if (err != 1) {
		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]);
		if (err != 1) {
			err = -EINVAL;
			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
			return err;
		}
		nr_grefs = 1;
	} else {
		unsigned int i;
	nr_grefs = blkif->nr_ring_pages;

		if (ring_page_order > xen_blkif_max_ring_order) {
			err = -EINVAL;
			xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
					 dir, ring_page_order,
					 xen_blkif_max_ring_order);
			return err;
	if (unlikely(!nr_grefs)) {
		WARN_ON(true);
		return -EINVAL;
	}

		nr_grefs = 1 << ring_page_order;
	for (i = 0; i < nr_grefs; i++) {
		char ring_ref_name[RINGREF_NAME_LEN];

		snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
		err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
				   "%u", &ring_ref[i]);

		if (err != 1) {
			if (nr_grefs == 1)
				break;

			err = -EINVAL;
			xenbus_dev_fatal(dev, err, "reading %s/%s",
					 dir, ring_ref_name);
			return err;
		}
	}

	if (err != 1) {
		WARN_ON(nr_grefs != 1);

		err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
				   &ring_ref[0]);
		if (err != 1) {
			err = -EINVAL;
			xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
			return err;
		}
	}
	blkif->nr_ring_pages = nr_grefs;

	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
		req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1031,6 +1030,7 @@ static int connect_ring(struct backend_info *be)
	size_t xspathsize;
	const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
	unsigned int requested_num_queues = 0;
	unsigned int ring_page_order;

	pr_debug("%s %s\n", __func__, dev->otherend);

@@ -1076,6 +1076,20 @@ static int connect_ring(struct backend_info *be)
		 blkif->nr_rings, blkif->blk_protocol, protocol,
		 pers_grants ? "persistent grants" : "");

	ring_page_order = xenbus_read_unsigned(dev->otherend,
					       "ring-page-order", 0);

	if (ring_page_order > xen_blkif_max_ring_order) {
		err = -EINVAL;
		xenbus_dev_fatal(dev, err,
				 "requested ring page order %d exceed max:%d",
				 ring_page_order,
				 xen_blkif_max_ring_order);
		return err;
	}

	blkif->nr_ring_pages = 1 << ring_page_order;

	if (blkif->nr_rings == 1)
		return read_per_ring_refs(&blkif->rings[0], dev->otherend);
	else {