Commit f5e27a20 authored by Doug Ledford's avatar Doug Ledford
Browse files

Merge branch 'k.o/for-rc' into k.o/wip/dl-for-next



Several items of conflict have arisen between the RDMA stack's for-rc
branch and upcoming for-next work:

9fd4350b ("IB/rxe: avoid double kfree_skb") directly conflicts with
2e473507 ("IB/rxe: optimize the function duplicate_request")

Patches already submitted by Intel for the hfi1 driver will fail to
apply cleanly without this merge

Other people on the mailing list have notified that their upcoming
patches also fail to apply cleanly without this merge

Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents 064e5262 9aa16921
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -61,9 +61,12 @@ config INFINIBAND_ON_DEMAND_PAGING
	  pages on demand instead.

config INFINIBAND_ADDR_TRANS
	bool
	bool "RDMA/CM"
	depends on INFINIBAND
	default y
	---help---
	  Support for RDMA communication manager (CM).
	  This allows for a generic connection abstraction over RDMA.

config INFINIBAND_ADDR_TRANS_CONFIGFS
	bool
+35 −20
Original line number Diff line number Diff line
@@ -291,16 +291,20 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
		 * so lookup free slot only if requested.
		 */
		if (pempty && empty < 0) {
			if (data->props & GID_TABLE_ENTRY_INVALID) {
				/* Found an invalid (free) entry; allocate it */
				if (data->props & GID_TABLE_ENTRY_DEFAULT) {
					if (default_gid)
						empty = curr_index;
				} else {
			if (data->props & GID_TABLE_ENTRY_INVALID &&
			    (default_gid ==
			     !!(data->props & GID_TABLE_ENTRY_DEFAULT))) {
				/*
				 * Found an invalid (free) entry; allocate it.
				 * If default GID is requested, then our
				 * found slot must be one of the DEFAULT
				 * reserved slots or we fail.
				 * This ensures that only DEFAULT reserved
				 * slots are used for default property GIDs.
				 */
				empty = curr_index;
			}
		}
		}

		/*
		 * Additionally find_gid() is used to find valid entry during
@@ -420,8 +424,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
	return ret;
}

int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
static int
_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		  union ib_gid *gid, struct ib_gid_attr *attr,
		  unsigned long mask, bool default_gid)
{
	struct ib_gid_table *table;
	int ret = 0;
@@ -431,11 +437,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,

	mutex_lock(&table->lock);

	ix = find_gid(table, gid, attr, false,
		      GID_ATTR_FIND_MASK_GID	  |
		      GID_ATTR_FIND_MASK_GID_TYPE |
		      GID_ATTR_FIND_MASK_NETDEV,
		      NULL);
	ix = find_gid(table, gid, attr, default_gid, mask, NULL);
	if (ix < 0) {
		ret = -EINVAL;
		goto out_unlock;
@@ -452,6 +454,17 @@ out_unlock:
	return ret;
}

int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
		     union ib_gid *gid, struct ib_gid_attr *attr)
{
	unsigned long mask = GID_ATTR_FIND_MASK_GID	  |
			     GID_ATTR_FIND_MASK_GID_TYPE |
			     GID_ATTR_FIND_MASK_DEFAULT  |
			     GID_ATTR_FIND_MASK_NETDEV;

	return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
}

int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
				     struct net_device *ndev)
{
@@ -728,7 +741,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
				  unsigned long gid_type_mask,
				  enum ib_cache_gid_default_mode mode)
{
	union ib_gid gid;
	union ib_gid gid = { };
	struct ib_gid_attr gid_attr;
	struct ib_gid_table *table;
	unsigned int gid_type;
@@ -736,7 +749,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,

	table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;

	make_default_gid(ndev, &gid);
	mask = GID_ATTR_FIND_MASK_GID_TYPE |
	       GID_ATTR_FIND_MASK_DEFAULT |
	       GID_ATTR_FIND_MASK_NETDEV;
	memset(&gid_attr, 0, sizeof(gid_attr));
	gid_attr.ndev = ndev;

@@ -747,12 +762,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
		gid_attr.gid_type = gid_type;

		if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
			mask = GID_ATTR_FIND_MASK_GID_TYPE |
			       GID_ATTR_FIND_MASK_DEFAULT;
			make_default_gid(ndev, &gid);
			__ib_cache_gid_add(ib_dev, port, &gid,
					   &gid_attr, mask, true);
		} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
			ib_cache_gid_del(ib_dev, port, &gid, &gid_attr);
			_ib_cache_gid_del(ib_dev, port, &gid,
					  &gid_attr, mask, true);
		}
	}
}
+43 −17
Original line number Diff line number Diff line
@@ -381,6 +381,8 @@ struct cma_hdr {
#define CMA_VERSION 0x00

struct cma_req_info {
	struct sockaddr_storage listen_addr_storage;
	struct sockaddr_storage src_addr_storage;
	struct ib_device *device;
	int port;
	union ib_gid local_gid;
@@ -865,7 +867,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
{
	struct ib_qp_attr qp_attr;
	int qp_attr_mask, ret;
	union ib_gid sgid;

	mutex_lock(&id_priv->qp_mutex);
	if (!id_priv->id.qp) {
@@ -888,12 +889,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
	if (ret)
		goto out;

	ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
			   rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index,
			   &sgid, NULL);
	if (ret)
		goto out;

	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);

	if (conn_param)
@@ -1339,11 +1334,11 @@ static bool validate_net_dev(struct net_device *net_dev,
}

static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
					  const struct cma_req_info *req)
					  struct cma_req_info *req)
{
	struct sockaddr_storage listen_addr_storage, src_addr_storage;
	struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
			*src_addr = (struct sockaddr *)&src_addr_storage;
	struct sockaddr *listen_addr =
			(struct sockaddr *)&req->listen_addr_storage;
	struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
	struct net_device *net_dev;
	const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
	int err;
@@ -1358,11 +1353,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
	if (!net_dev)
		return ERR_PTR(-ENODEV);

	if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
		dev_put(net_dev);
		return ERR_PTR(-EHOSTUNREACH);
	}

	return net_dev;
}

@@ -1489,15 +1479,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
		}
	}

	/*
	 * Net namespace might be getting deleted while route lookup,
	 * cm_id lookup is in progress. Therefore, perform netdevice
	 * validation, cm_id lookup under rcu lock.
	 * RCU lock along with netdevice state check, synchronizes with
	 * netdevice migrating to different net namespace and also avoids
	 * case where net namespace doesn't get deleted while lookup is in
	 * progress.
	 * If the device state is not IFF_UP, its properties such as ifindex
	 * and nd_net cannot be trusted to remain valid without rcu lock.
	 * net/core/dev.c change_net_namespace() ensures to synchronize with
	 * ongoing operations on net device after device is closed using
	 * synchronize_net().
	 */
	rcu_read_lock();
	if (*net_dev) {
		/*
		 * If netdevice is down, it is likely that it is administratively
		 * down or it might be migrating to different namespace.
		 * In that case avoid further processing, as the net namespace
		 * or ifindex may change.
		 */
		if (((*net_dev)->flags & IFF_UP) == 0) {
			id_priv = ERR_PTR(-EHOSTUNREACH);
			goto err;
		}

		if (!validate_net_dev(*net_dev,
				 (struct sockaddr *)&req.listen_addr_storage,
				 (struct sockaddr *)&req.src_addr_storage)) {
			id_priv = ERR_PTR(-EHOSTUNREACH);
			goto err;
		}
	}

	bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
				rdma_ps_from_service_id(req.service_id),
				cma_port_from_service_id(req.service_id));
	id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
err:
	rcu_read_unlock();
	if (IS_ERR(id_priv) && *net_dev) {
		dev_put(*net_dev);
		*net_dev = NULL;
	}

	return id_priv;
}

+4 −1
Original line number Diff line number Diff line
@@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
			struct sockaddr_storage *mapped_sockaddr,
			u8 nl_client)
{
	struct hlist_head *hash_bucket_head;
	struct hlist_head *hash_bucket_head = NULL;
	struct iwpm_mapping_info *map_info;
	unsigned long flags;
	int ret = -EINVAL;
@@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
		}
	}
	spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);

	if (!hash_bucket_head)
		kfree(map_info);
	return ret;
}

+2 −2
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");

static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0;
static atomic_t ib_mad_client_id = ATOMIC_INIT(0);

/* Port list lock */
static DEFINE_SPINLOCK(ib_mad_port_list_lock);
@@ -377,7 +377,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
	}

	spin_lock_irqsave(&port_priv->reg_lock, flags);
	mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
	mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);

	/*
	 * Make sure MAD registration (if supplied)
Loading