Commit 175e8efe authored by Doug Ledford's avatar Doug Ledford
Browse files

Merge branches 'bart-srp', 'generic-errors', 'ira-cleanups' and 'mwang-v8' into k.o/for-4.2

Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -156,7 +156,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
		goto error1;
	}

	if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
	if (rdma_cap_ib_smi(device, port_num)) {
		/* Obtain send only MAD agent for SMI QP */
		port_priv->agent[0] = ib_register_mad_agent(device, port_num,
							    IB_QPT_SMI, NULL, 0,
+25 −36
Original line number Diff line number Diff line
@@ -58,17 +58,6 @@ struct ib_update_work {
	u8                 port_num;
};

static inline int start_port(struct ib_device *device)
{
	return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
}

static inline int end_port(struct ib_device *device)
{
	return (device->node_type == RDMA_NODE_IB_SWITCH) ?
		0 : device->phys_port_cnt;
}

int ib_get_cached_gid(struct ib_device *device,
		      u8                port_num,
		      int               index,
@@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device,
	unsigned long flags;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.gid_cache[port_num - start_port(device)];
	cache = device->cache.gid_cache[port_num - rdma_start_port(device)];

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
@@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device,

	read_lock_irqsave(&device->cache.lock, flags);

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
		cache = device->cache.gid_cache[p];
		for (i = 0; i < cache->table_len; ++i) {
			if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
				*port_num = p + start_port(device);
				*port_num = p + rdma_start_port(device);
				if (index)
					*index = i;
				ret = 0;
@@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device,
	unsigned long flags;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.pkey_cache[port_num - start_port(device)];
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];

	if (index < 0 || index >= cache->table_len)
		ret = -EINVAL;
@@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device,
	int ret = -ENOENT;
	int partial_ix = -1;

	if (port_num < start_port(device) || port_num > end_port(device))
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.pkey_cache[port_num - start_port(device)];
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];

	*index = -1;

@@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
	int i;
	int ret = -ENOENT;

	if (port_num < start_port(device) || port_num > end_port(device))
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);

	cache = device->cache.pkey_cache[port_num - start_port(device)];
	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];

	*index = -1;

@@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device,
	unsigned long flags;
	int ret = 0;

	if (port_num < start_port(device) || port_num > end_port(device))
	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
		return -EINVAL;

	read_lock_irqsave(&device->cache.lock, flags);
	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
	read_unlock_irqrestore(&device->cache.lock, flags);

	return ret;
@@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device,

	write_lock_irq(&device->cache.lock);

	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
	old_gid_cache  = device->cache.gid_cache [port - start_port(device)];
	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
	old_gid_cache  = device->cache.gid_cache [port - rdma_start_port(device)];

	device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
	device->cache.gid_cache [port - start_port(device)] = gid_cache;
	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
	device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache;

	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;

	write_unlock_irq(&device->cache.lock);

@@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device)

	device->cache.pkey_cache =
		kmalloc(sizeof *device->cache.pkey_cache *
			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
	device->cache.gid_cache =
		kmalloc(sizeof *device->cache.gid_cache *
			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);

	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
					  (end_port(device) -
					   start_port(device) + 1),
					  (rdma_end_port(device) -
					   rdma_start_port(device) + 1),
					  GFP_KERNEL);

	if (!device->cache.pkey_cache || !device->cache.gid_cache ||
@@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device)
		goto err;
	}

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
		device->cache.pkey_cache[p] = NULL;
		device->cache.gid_cache [p] = NULL;
		ib_cache_update(device, p + start_port(device));
		ib_cache_update(device, p + rdma_start_port(device));
	}

	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
@@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device)
	return;

err_cache:
	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
		kfree(device->cache.pkey_cache[p]);
		kfree(device->cache.gid_cache[p]);
	}
@@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
	ib_unregister_event_handler(&device->cache.event_handler);
	flush_workqueue(ib_wq);

	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
	for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) {
		kfree(device->cache.pkey_cache[p]);
		kfree(device->cache.gid_cache[p]);
	}
+17 −3
Original line number Diff line number Diff line
@@ -3759,11 +3759,9 @@ static void cm_add_one(struct ib_device *ib_device)
	};
	unsigned long flags;
	int ret;
	int count = 0;
	u8 i;

	if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
		return;

	cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
			 ib_device->phys_port_cnt, GFP_KERNEL);
	if (!cm_dev)
@@ -3782,6 +3780,9 @@ static void cm_add_one(struct ib_device *ib_device)

	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
	for (i = 1; i <= ib_device->phys_port_cnt; i++) {
		if (!rdma_cap_ib_cm(ib_device, i))
			continue;

		port = kzalloc(sizeof *port, GFP_KERNEL);
		if (!port)
			goto error1;
@@ -3808,7 +3809,13 @@ static void cm_add_one(struct ib_device *ib_device)
		ret = ib_modify_port(ib_device, i, 0, &port_modify);
		if (ret)
			goto error3;

		count++;
	}

	if (!count)
		goto free;

	ib_set_client_data(ib_device, &cm_client, cm_dev);

	write_lock_irqsave(&cm.device_lock, flags);
@@ -3824,11 +3831,15 @@ error1:
	port_modify.set_port_cap_mask = 0;
	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
	while (--i) {
		if (!rdma_cap_ib_cm(ib_device, i))
			continue;

		port = cm_dev->port[i-1];
		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
		ib_unregister_mad_agent(port->mad_agent);
		cm_remove_port_fs(port);
	}
free:
	device_unregister(cm_dev->device);
	kfree(cm_dev);
}
@@ -3852,6 +3863,9 @@ static void cm_remove_one(struct ib_device *ib_device)
	write_unlock_irqrestore(&cm.device_lock, flags);

	for (i = 1; i <= ib_device->phys_port_cnt; i++) {
		if (!rdma_cap_ib_cm(ib_device, i))
			continue;

		port = cm_dev->port[i-1];
		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
		ib_unregister_mad_agent(port->mad_agent);
+128 −157
Original line number Diff line number Diff line
@@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18

static const char * const cma_events[] = {
	[RDMA_CM_EVENT_ADDR_RESOLVED]	 = "address resolved",
	[RDMA_CM_EVENT_ADDR_ERROR]	 = "address error",
	[RDMA_CM_EVENT_ROUTE_RESOLVED]	 = "route resolved ",
	[RDMA_CM_EVENT_ROUTE_ERROR]	 = "route error",
	[RDMA_CM_EVENT_CONNECT_REQUEST]	 = "connect request",
	[RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
	[RDMA_CM_EVENT_CONNECT_ERROR]	 = "connect error",
	[RDMA_CM_EVENT_UNREACHABLE]	 = "unreachable",
	[RDMA_CM_EVENT_REJECTED]	 = "rejected",
	[RDMA_CM_EVENT_ESTABLISHED]	 = "established",
	[RDMA_CM_EVENT_DISCONNECTED]	 = "disconnected",
	[RDMA_CM_EVENT_DEVICE_REMOVAL]	 = "device removal",
	[RDMA_CM_EVENT_MULTICAST_JOIN]	 = "multicast join",
	[RDMA_CM_EVENT_MULTICAST_ERROR]	 = "multicast error",
	[RDMA_CM_EVENT_ADDR_CHANGE]	 = "address change",
	[RDMA_CM_EVENT_TIMEWAIT_EXIT]	 = "timewait exit",
};

const char *rdma_event_msg(enum rdma_cm_event_type event)
{
	size_t index = event;

	return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
			cma_events[index] : "unrecognized event";
}
EXPORT_SYMBOL(rdma_event_msg);

static void cma_add_one(struct ib_device *device);
static void cma_remove_one(struct ib_device *device);

@@ -349,18 +377,35 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a
	return ret;
}

static inline int cma_validate_port(struct ib_device *device, u8 port,
				      union ib_gid *gid, int dev_type)
{
	u8 found_port;
	int ret = -ENODEV;

	if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
		return ret;

	if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
		return ret;

	ret = ib_find_cached_gid(device, gid, &found_port, NULL);
	if (port != found_port)
		return -ENODEV;

	return ret;
}

static int cma_acquire_dev(struct rdma_id_private *id_priv,
			   struct rdma_id_private *listen_id_priv)
{
	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
	struct cma_device *cma_dev;
	union ib_gid gid, iboe_gid;
	union ib_gid gid, iboe_gid, *gidp;
	int ret = -ENODEV;
	u8 port, found_port;
	enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
		IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
	u8 port;

	if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
	    id_priv->id.ps == RDMA_PS_IPOIB)
		return -EINVAL;

@@ -370,44 +415,39 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,

	memcpy(&gid, dev_addr->src_dev_addr +
	       rdma_addr_gid_offset(dev_addr), sizeof gid);
	if (listen_id_priv &&
	    rdma_port_get_link_layer(listen_id_priv->id.device,
				     listen_id_priv->id.port_num) == dev_ll) {

	if (listen_id_priv) {
		cma_dev = listen_id_priv->cma_dev;
		port = listen_id_priv->id.port_num;
		if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
		    rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
			ret = ib_find_cached_gid(cma_dev->device, &iboe_gid,
						 &found_port, NULL);
		else
			ret = ib_find_cached_gid(cma_dev->device, &gid,
						 &found_port, NULL);
		gidp = rdma_protocol_roce(cma_dev->device, port) ?
		       &iboe_gid : &gid;

		if (!ret && (port  == found_port)) {
			id_priv->id.port_num = found_port;
		ret = cma_validate_port(cma_dev->device, port, gidp,
					dev_addr->dev_type);
		if (!ret) {
			id_priv->id.port_num = port;
			goto out;
		}
	}

	list_for_each_entry(cma_dev, &dev_list, list) {
		for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
			if (listen_id_priv &&
			    listen_id_priv->cma_dev == cma_dev &&
			    listen_id_priv->id.port_num == port)
				continue;
			if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) {
				if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB &&
				    rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET)
					ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL);
				else
					ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL);

				if (!ret && (port == found_port)) {
					id_priv->id.port_num = found_port;
			gidp = rdma_protocol_roce(cma_dev->device, port) ?
			       &iboe_gid : &gid;

			ret = cma_validate_port(cma_dev->device, port, gidp,
						dev_addr->dev_type);
			if (!ret) {
				id_priv->id.port_num = port;
				goto out;
			}
		}
	}
	}

out:
	if (!ret)
@@ -435,10 +475,10 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
	pkey = ntohs(addr->sib_pkey);

	list_for_each_entry(cur_dev, &dev_list, list) {
		if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
			if (!rdma_cap_af_ib(cur_dev->device, p))
				continue;

		for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
			if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
				continue;

@@ -633,10 +673,9 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
	if (ret)
		goto out;

	if (rdma_node_get_transport(id_priv->cma_dev->device->node_type)
	    == RDMA_TRANSPORT_IB &&
	    rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)
	    == IB_LINK_LAYER_ETHERNET) {
	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);

	if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
		ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);

		if (ret)
@@ -700,11 +739,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
	int ret;
	u16 pkey;

	if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) ==
	    IB_LINK_LAYER_INFINIBAND)
		pkey = ib_addr_get_pkey(dev_addr);
	else
	if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
		pkey = 0xffff;
	else
		pkey = ib_addr_get_pkey(dev_addr);

	ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
				  pkey, &qp_attr->pkey_index);
@@ -735,8 +773,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
	int ret = 0;

	id_priv = container_of(id, struct rdma_id_private, id);
	switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
	case RDMA_TRANSPORT_IB:
	if (rdma_cap_ib_cm(id->device, id->port_num)) {
		if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
		else
@@ -745,19 +782,15 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,

		if (qp_attr->qp_state == IB_QPS_RTR)
			qp_attr->rq_psn = id_priv->seq_num;
		break;
	case RDMA_TRANSPORT_IWARP:
	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
		if (!id_priv->cm_id.iw) {
			qp_attr->qp_access_flags = 0;
			*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
		} else
			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
						 qp_attr_mask);
		break;
	default:
	} else
		ret = -ENOSYS;
		break;
	}

	return ret;
}
@@ -935,13 +968,9 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv)

static void cma_cancel_route(struct rdma_id_private *id_priv)
{
	switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) {
	case IB_LINK_LAYER_INFINIBAND:
	if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
		if (id_priv->query)
			ib_sa_cancel_query(id_priv->query_id, id_priv->query);
		break;
	default:
		break;
	}
}

@@ -1013,17 +1042,12 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
		mc = container_of(id_priv->mc_list.next,
				  struct cma_multicast, list);
		list_del(&mc->list);
		switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) {
		case IB_LINK_LAYER_INFINIBAND:
		if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
				      id_priv->id.port_num)) {
			ib_sa_free_multicast(mc->multicast.ib);
			kfree(mc);
			break;
		case IB_LINK_LAYER_ETHERNET:
		} else
			kref_put(&mc->mcref, release_mc);
			break;
		default:
			break;
		}
	}
}

@@ -1044,17 +1068,12 @@ void rdma_destroy_id(struct rdma_cm_id *id)
	mutex_unlock(&id_priv->handler_mutex);

	if (id_priv->cma_dev) {
		switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
		case RDMA_TRANSPORT_IB:
		if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
			if (id_priv->cm_id.ib)
				ib_destroy_cm_id(id_priv->cm_id.ib);
			break;
		case RDMA_TRANSPORT_IWARP:
		} else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
			if (id_priv->cm_id.iw)
				iw_destroy_cm_id(id_priv->cm_id.iw);
			break;
		default:
			break;
		}
		cma_leave_mc_groups(id_priv);
		cma_release_dev(id_priv);
@@ -1632,8 +1651,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
	struct rdma_cm_id *id;
	int ret;

	if (cma_family(id_priv) == AF_IB &&
	    rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB)
	if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
		return;

	id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
@@ -1974,26 +1992,15 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
		return -EINVAL;

	atomic_inc(&id_priv->refcount);
	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
		case IB_LINK_LAYER_INFINIBAND:
	if (rdma_cap_ib_sa(id->device, id->port_num))
		ret = cma_resolve_ib_route(id_priv, timeout_ms);
			break;
		case IB_LINK_LAYER_ETHERNET:
	else if (rdma_protocol_roce(id->device, id->port_num))
		ret = cma_resolve_iboe_route(id_priv);
			break;
		default:
			ret = -ENOSYS;
		}
		break;
	case RDMA_TRANSPORT_IWARP:
	else if (rdma_protocol_iwarp(id->device, id->port_num))
		ret = cma_resolve_iw_route(id_priv, timeout_ms);
		break;
	default:
	else
		ret = -ENOSYS;
		break;
	}

	if (ret)
		goto err;

@@ -2035,7 +2042,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv)
	mutex_lock(&lock);
	list_for_each_entry(cur_dev, &dev_list, list) {
		if (cma_family(id_priv) == AF_IB &&
		    rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB)
		    !rdma_cap_ib_cm(cur_dev->device, 1))
			continue;

		if (!cma_dev)
@@ -2067,7 +2074,7 @@ port_found:
		goto out;

	id_priv->id.route.addr.dev_addr.dev_type =
		(rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ?
		(rdma_protocol_ib(cma_dev->device, p)) ?
		ARPHRD_INFINIBAND : ARPHRD_ETHER;

	rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
@@ -2544,18 +2551,15 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)

	id_priv->backlog = backlog;
	if (id->device) {
		switch (rdma_node_get_transport(id->device->node_type)) {
		case RDMA_TRANSPORT_IB:
		if (rdma_cap_ib_cm(id->device, 1)) {
			ret = cma_ib_listen(id_priv);
			if (ret)
				goto err;
			break;
		case RDMA_TRANSPORT_IWARP:
		} else if (rdma_cap_iw_cm(id->device, 1)) {
			ret = cma_iw_listen(id_priv, backlog);
			if (ret)
				goto err;
			break;
		default:
		} else {
			ret = -ENOSYS;
			goto err;
		}
@@ -2891,20 +2895,15 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
		id_priv->srq = conn_param->srq;
	}

	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
	if (rdma_cap_ib_cm(id->device, id->port_num)) {
		if (id->qp_type == IB_QPT_UD)
			ret = cma_resolve_ib_udp(id_priv, conn_param);
		else
			ret = cma_connect_ib(id_priv, conn_param);
		break;
	case RDMA_TRANSPORT_IWARP:
	} else if (rdma_cap_iw_cm(id->device, id->port_num))
		ret = cma_connect_iw(id_priv, conn_param);
		break;
	default:
	else
		ret = -ENOSYS;
		break;
	}
	if (ret)
		goto err;

@@ -3007,8 +3006,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
		id_priv->srq = conn_param->srq;
	}

	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
	if (rdma_cap_ib_cm(id->device, id->port_num)) {
		if (id->qp_type == IB_QPT_UD) {
			if (conn_param)
				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
@@ -3024,14 +3022,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
			else
				ret = cma_rep_recv(id_priv);
		}
		break;
	case RDMA_TRANSPORT_IWARP:
	} else if (rdma_cap_iw_cm(id->device, id->port_num))
		ret = cma_accept_iw(id_priv, conn_param);
		break;
	default:
	else
		ret = -ENOSYS;
		break;
	}

	if (ret)
		goto reject;
@@ -3075,8 +3069,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
	if (!id_priv->cm_id.ib)
		return -EINVAL;

	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
	if (rdma_cap_ib_cm(id->device, id->port_num)) {
		if (id->qp_type == IB_QPT_UD)
			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
						private_data, private_data_len);
@@ -3084,15 +3077,12 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
			ret = ib_send_cm_rej(id_priv->cm_id.ib,
					     IB_CM_REJ_CONSUMER_DEFINED, NULL,
					     0, private_data, private_data_len);
		break;
	case RDMA_TRANSPORT_IWARP:
	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
		ret = iw_cm_reject(id_priv->cm_id.iw,
				   private_data, private_data_len);
		break;
	default:
	} else
		ret = -ENOSYS;
		break;
	}

	return ret;
}
EXPORT_SYMBOL(rdma_reject);
@@ -3106,22 +3096,18 @@ int rdma_disconnect(struct rdma_cm_id *id)
	if (!id_priv->cm_id.ib)
		return -EINVAL;

	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
	if (rdma_cap_ib_cm(id->device, id->port_num)) {
		ret = cma_modify_qp_err(id_priv);
		if (ret)
			goto out;
		/* Initiate or respond to a disconnect. */
		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
			ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
		break;
	case RDMA_TRANSPORT_IWARP:
	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
		break;
	default:
	} else
		ret = -EINVAL;
		break;
	}

out:
	return ret;
}
@@ -3367,24 +3353,13 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
	list_add(&mc->list, &id_priv->mc_list);
	spin_unlock(&id_priv->lock);

	switch (rdma_node_get_transport(id->device->node_type)) {
	case RDMA_TRANSPORT_IB:
		switch (rdma_port_get_link_layer(id->device, id->port_num)) {
		case IB_LINK_LAYER_INFINIBAND:
			ret = cma_join_ib_multicast(id_priv, mc);
			break;
		case IB_LINK_LAYER_ETHERNET:
	if (rdma_protocol_roce(id->device, id->port_num)) {
		kref_init(&mc->mcref);
		ret = cma_iboe_join_multicast(id_priv, mc);
			break;
		default:
			ret = -EINVAL;
		}
		break;
	default:
	} else if (rdma_cap_ib_mcast(id->device, id->port_num))
		ret = cma_join_ib_multicast(id_priv, mc);
	else
		ret = -ENOSYS;
		break;
	}

	if (ret) {
		spin_lock_irq(&id_priv->lock);
@@ -3412,19 +3387,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
				ib_detach_mcast(id->qp,
						&mc->multicast.ib->rec.mgid,
						be16_to_cpu(mc->multicast.ib->rec.mlid));
			if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
				switch (rdma_port_get_link_layer(id->device, id->port_num)) {
				case IB_LINK_LAYER_INFINIBAND:

			BUG_ON(id_priv->cma_dev->device != id->device);

			if (rdma_cap_ib_mcast(id->device, id->port_num)) {
				ib_sa_free_multicast(mc->multicast.ib);
				kfree(mc);
					break;
				case IB_LINK_LAYER_ETHERNET:
			} else if (rdma_protocol_roce(id->device, id->port_num))
				kref_put(&mc->mcref, release_mc);
					break;
				default:
					break;
				}
			}

			return;
		}
	}
+31 −50

File changed.

Preview size limit exceeded, changes collapsed.

Loading