Commit a032e4f6 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Christoph Hellwig
Browse files

nvmet-rdma: fix bonding failover possible NULL deref



RDMA_CM_EVENT_ADDR_CHANGE event occur in the case of bonding failover
on normal as well as on listening cm_ids. Hence this event will
immediately trigger a NULL dereference trying to disconnect a queue
for a cm_id that actually belongs to the port.

To fix this we provide a different handler for the listener cm_ids
that will defer a work to disable+(re)enable the port which essentially
destroys and setups another listener cm_id

Reported-by: default avatarAlex Lyakas <alex@zadara.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Tested-by: default avatarAlex Lyakas <alex@zadara.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent f0e656e4
Loading
Loading
Loading
Loading
+119 −56
Original line number Original line Diff line number Diff line
@@ -105,6 +105,13 @@ struct nvmet_rdma_queue {
	struct list_head	queue_list;
	struct list_head	queue_list;
};
};


struct nvmet_rdma_port {
	struct nvmet_port	*nport;
	struct sockaddr_storage addr;
	struct rdma_cm_id	*cm_id;
	struct delayed_work	repair_work;
};

struct nvmet_rdma_device {
struct nvmet_rdma_device {
	struct ib_device	*device;
	struct ib_device	*device;
	struct ib_pd		*pd;
	struct ib_pd		*pd;
@@ -917,7 +924,8 @@ static void nvmet_rdma_free_dev(struct kref *ref)
static struct nvmet_rdma_device *
static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
{
	struct nvmet_port *port = cm_id->context;
	struct nvmet_rdma_port *port = cm_id->context;
	struct nvmet_port *nport = port->nport;
	struct nvmet_rdma_device *ndev;
	struct nvmet_rdma_device *ndev;
	int inline_page_count;
	int inline_page_count;
	int inline_sge_count;
	int inline_sge_count;
@@ -934,17 +942,17 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
	if (!ndev)
	if (!ndev)
		goto out_err;
		goto out_err;


	inline_page_count = num_pages(port->inline_data_size);
	inline_page_count = num_pages(nport->inline_data_size);
	inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
	inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
				cm_id->device->attrs.max_recv_sge) - 1;
				cm_id->device->attrs.max_recv_sge) - 1;
	if (inline_page_count > inline_sge_count) {
	if (inline_page_count > inline_sge_count) {
		pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
		pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
			port->inline_data_size, cm_id->device->name,
			nport->inline_data_size, cm_id->device->name,
			inline_sge_count * PAGE_SIZE);
			inline_sge_count * PAGE_SIZE);
		port->inline_data_size = inline_sge_count * PAGE_SIZE;
		nport->inline_data_size = inline_sge_count * PAGE_SIZE;
		inline_page_count = inline_sge_count;
		inline_page_count = inline_sge_count;
	}
	}
	ndev->inline_data_size = port->inline_data_size;
	ndev->inline_data_size = nport->inline_data_size;
	ndev->inline_page_count = inline_page_count;
	ndev->inline_page_count = inline_page_count;
	ndev->device = cm_id->device;
	ndev->device = cm_id->device;
	kref_init(&ndev->ref);
	kref_init(&ndev->ref);
@@ -1272,6 +1280,7 @@ static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
		struct rdma_cm_event *event)
		struct rdma_cm_event *event)
{
{
	struct nvmet_rdma_port *port = cm_id->context;
	struct nvmet_rdma_device *ndev;
	struct nvmet_rdma_device *ndev;
	struct nvmet_rdma_queue *queue;
	struct nvmet_rdma_queue *queue;
	int ret = -EINVAL;
	int ret = -EINVAL;
@@ -1287,7 +1296,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
		ret = -ENOMEM;
		ret = -ENOMEM;
		goto put_device;
		goto put_device;
	}
	}
	queue->port = cm_id->context;
	queue->port = port->nport;


	if (queue->host_qid == 0) {
	if (queue->host_qid == 0) {
		/* Let inflight controller teardown complete */
		/* Let inflight controller teardown complete */
@@ -1412,7 +1421,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
		struct nvmet_rdma_queue *queue)
		struct nvmet_rdma_queue *queue)
{
{
	struct nvmet_port *port;
	struct nvmet_rdma_port *port;


	if (queue) {
	if (queue) {
		/*
		/*
@@ -1431,7 +1440,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
	 * cm_id destroy. use atomic xchg to make sure
	 * cm_id destroy. use atomic xchg to make sure
	 * we don't compete with remove_port.
	 * we don't compete with remove_port.
	 */
	 */
	if (xchg(&port->priv, NULL) != cm_id)
	if (xchg(&port->cm_id, NULL) != cm_id)
		return 0;
		return 0;


	/*
	/*
@@ -1462,6 +1471,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
		nvmet_rdma_queue_established(queue);
		nvmet_rdma_queue_established(queue);
		break;
		break;
	case RDMA_CM_EVENT_ADDR_CHANGE:
	case RDMA_CM_EVENT_ADDR_CHANGE:
		if (!queue) {
			struct nvmet_rdma_port *port = cm_id->context;

			schedule_delayed_work(&port->repair_work, 0);
			break;
		}
		/* FALLTHROUGH */
	case RDMA_CM_EVENT_DISCONNECTED:
	case RDMA_CM_EVENT_DISCONNECTED:
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
		nvmet_rdma_queue_disconnect(queue);
		nvmet_rdma_queue_disconnect(queue);
@@ -1504,42 +1520,19 @@ restart:
	mutex_unlock(&nvmet_rdma_queue_mutex);
	mutex_unlock(&nvmet_rdma_queue_mutex);
}
}


static int nvmet_rdma_add_port(struct nvmet_port *port)
static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
{
	struct rdma_cm_id *cm_id;
	struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
	struct sockaddr_storage addr = { };
	__kernel_sa_family_t af;
	int ret;

	switch (port->disc_addr.adrfam) {
	case NVMF_ADDR_FAMILY_IP4:
		af = AF_INET;
		break;
	case NVMF_ADDR_FAMILY_IP6:
		af = AF_INET6;
		break;
	default:
		pr_err("address family %d not supported\n",
				port->disc_addr.adrfam);
		return -EINVAL;
	}


	if (port->inline_data_size < 0) {
	if (cm_id)
		port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
		rdma_destroy_id(cm_id);
	} else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
		pr_warn("inline_data_size %u is too large, reducing to %u\n",
			port->inline_data_size,
			NVMET_RDMA_MAX_INLINE_DATA_SIZE);
		port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
}
}


	ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
			port->disc_addr.trsvcid, &addr);
{
	if (ret) {
	struct sockaddr *addr = (struct sockaddr *)&port->addr;
		pr_err("malformed ip/port passed: %s:%s\n",
	struct rdma_cm_id *cm_id;
			port->disc_addr.traddr, port->disc_addr.trsvcid);
	int ret;
		return ret;
	}


	cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
	cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
			RDMA_PS_TCP, IB_QPT_RC);
			RDMA_PS_TCP, IB_QPT_RC);
@@ -1558,23 +1551,19 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
		goto out_destroy_id;
		goto out_destroy_id;
	}
	}


	ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
	ret = rdma_bind_addr(cm_id, addr);
	if (ret) {
	if (ret) {
		pr_err("binding CM ID to %pISpcs failed (%d)\n",
		pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
			(struct sockaddr *)&addr, ret);
		goto out_destroy_id;
		goto out_destroy_id;
	}
	}


	ret = rdma_listen(cm_id, 128);
	ret = rdma_listen(cm_id, 128);
	if (ret) {
	if (ret) {
		pr_err("listening to %pISpcs failed (%d)\n",
		pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
			(struct sockaddr *)&addr, ret);
		goto out_destroy_id;
		goto out_destroy_id;
	}
	}


	pr_info("enabling port %d (%pISpcs)\n",
	port->cm_id = cm_id;
		le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
	port->priv = cm_id;
	return 0;
	return 0;


out_destroy_id:
out_destroy_id:
@@ -1582,18 +1571,92 @@ out_destroy_id:
	return ret;
	return ret;
}
}


static void nvmet_rdma_remove_port(struct nvmet_port *port)
static void nvmet_rdma_repair_port_work(struct work_struct *w)
{
{
	struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
	struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
			struct nvmet_rdma_port, repair_work);
	int ret;


	if (cm_id)
	nvmet_rdma_disable_port(port);
		rdma_destroy_id(cm_id);
	ret = nvmet_rdma_enable_port(port);
	if (ret)
		schedule_delayed_work(&port->repair_work, 5 * HZ);
}

static int nvmet_rdma_add_port(struct nvmet_port *nport)
{
	struct nvmet_rdma_port *port;
	__kernel_sa_family_t af;
	int ret;

	port = kzalloc(sizeof(*port), GFP_KERNEL);
	if (!port)
		return -ENOMEM;

	nport->priv = port;
	port->nport = nport;
	INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);

	switch (nport->disc_addr.adrfam) {
	case NVMF_ADDR_FAMILY_IP4:
		af = AF_INET;
		break;
	case NVMF_ADDR_FAMILY_IP6:
		af = AF_INET6;
		break;
	default:
		pr_err("address family %d not supported\n",
			nport->disc_addr.adrfam);
		ret = -EINVAL;
		goto out_free_port;
	}

	if (nport->inline_data_size < 0) {
		nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
	} else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
		pr_warn("inline_data_size %u is too large, reducing to %u\n",
			nport->inline_data_size,
			NVMET_RDMA_MAX_INLINE_DATA_SIZE);
		nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
	}

	ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
			nport->disc_addr.trsvcid, &port->addr);
	if (ret) {
		pr_err("malformed ip/port passed: %s:%s\n",
			nport->disc_addr.traddr, nport->disc_addr.trsvcid);
		goto out_free_port;
	}

	ret = nvmet_rdma_enable_port(port);
	if (ret)
		goto out_free_port;

	pr_info("enabling port %d (%pISpcs)\n",
		le16_to_cpu(nport->disc_addr.portid),
		(struct sockaddr *)&port->addr);

	return 0;

out_free_port:
	kfree(port);
	return ret;
}

static void nvmet_rdma_remove_port(struct nvmet_port *nport)
{
	struct nvmet_rdma_port *port = nport->priv;

	cancel_delayed_work_sync(&port->repair_work);
	nvmet_rdma_disable_port(port);
	kfree(port);
}
}


static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
		struct nvmet_port *port, char *traddr)
		struct nvmet_port *nport, char *traddr)
{
{
	struct rdma_cm_id *cm_id = port->priv;
	struct nvmet_rdma_port *port = nport->priv;
	struct rdma_cm_id *cm_id = port->cm_id;


	if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
	if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
		struct nvmet_rdma_rsp *rsp =
		struct nvmet_rdma_rsp *rsp =
@@ -1603,7 +1666,7 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,


		sprintf(traddr, "%pISc", addr);
		sprintf(traddr, "%pISc", addr);
	} else {
	} else {
		memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
		memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
	}
	}
}
}