Commit 48fea861 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx5-next'



Eli Cohen says:

====================
mlx5 update for 3.18

This series integrates a new mechanism for populating and extracting field values
used in the driver/firmware interaction around command mailboxes.

Changes from V1:
 - Remove unused definition of memcpy_cpu_to_be32()
 - Remove definitions of non_existent_*() and use BUILD_BUG_ON() instead.
 - Added a patch one line patch to add support for ConnectX-4 devices.

Changes from V0:
 - trimmed the auto-generated file to a minimum, as required by the reviewers.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 55a93b3e f832dc82
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
		return ERR_PTR(-EINVAL);

	entries = roundup_pow_of_two(entries + 1);
	if (entries > dev->mdev->caps.max_cqes)
	if (entries > dev->mdev->caps.gen.max_cqes)
		return ERR_PTR(-EINVAL);

	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
	int err;
	u32 fsel;

	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
	if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
		return -ENOSYS;

	in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
	int uninitialized_var(cqe_size);
	unsigned long flags;

	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
	if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
		pr_info("Firmware does not support resize CQ\n");
		return -ENOSYS;
	}
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
		return -EINVAL;

	entries = roundup_pow_of_two(entries + 1);
	if (entries > dev->mdev->caps.max_cqes + 1)
	if (entries > dev->mdev->caps.gen.max_cqes + 1)
		return -EINVAL;

	if (entries == ibcq->cqe + 1)
+1 −1
Original line number Diff line number Diff line
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)

	packet_error = be16_to_cpu(out_mad->status);

	dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
	dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ?
		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;

out:
+47 −36
Original line number Diff line number Diff line
@@ -157,11 +157,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
	struct mlx5_ib_dev *dev = to_mdev(ibdev);
	struct ib_smp *in_mad  = NULL;
	struct ib_smp *out_mad = NULL;
	struct mlx5_general_caps *gen;
	int err = -ENOMEM;
	int max_rq_sg;
	int max_sq_sg;
	u64 flags;

	gen = &dev->mdev->caps.gen;
	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
	if (!in_mad || !out_mad)
@@ -183,7 +185,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
		IB_DEVICE_PORT_ACTIVE_EVENT		|
		IB_DEVICE_SYS_IMAGE_GUID		|
		IB_DEVICE_RC_RNR_NAK_GEN;
	flags = dev->mdev->caps.flags;
	flags = gen->flags;
	if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
	if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -213,30 +215,31 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
	memcpy(&props->sys_image_guid, out_mad->data +	4, 8);

	props->max_mr_size	   = ~0ull;
	props->page_size_cap	   = dev->mdev->caps.min_page_sz;
	props->max_qp		   = 1 << dev->mdev->caps.log_max_qp;
	props->max_qp_wr	   = dev->mdev->caps.max_wqes;
	max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
	max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
	props->page_size_cap	   = gen->min_page_sz;
	props->max_qp		   = 1 << gen->log_max_qp;
	props->max_qp_wr	   = gen->max_wqes;
	max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
	max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
		sizeof(struct mlx5_wqe_data_seg);
	props->max_sge = min(max_rq_sg, max_sq_sg);
	props->max_cq		   = 1 << dev->mdev->caps.log_max_cq;
	props->max_cqe		   = dev->mdev->caps.max_cqes - 1;
	props->max_mr		   = 1 << dev->mdev->caps.log_max_mkey;
	props->max_pd		   = 1 << dev->mdev->caps.log_max_pd;
	props->max_qp_rd_atom	   = dev->mdev->caps.max_ra_req_qp;
	props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp;
	props->max_cq		   = 1 << gen->log_max_cq;
	props->max_cqe		   = gen->max_cqes - 1;
	props->max_mr		   = 1 << gen->log_max_mkey;
	props->max_pd		   = 1 << gen->log_max_pd;
	props->max_qp_rd_atom	   = 1 << gen->log_max_ra_req_qp;
	props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp;
	props->max_srq		   = 1 << gen->log_max_srq;
	props->max_srq_wr	   = gen->max_srq_wqes - 1;
	props->local_ca_ack_delay  = gen->local_ca_ack_delay;
	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
	props->max_srq		   = 1 << dev->mdev->caps.log_max_srq;
	props->max_srq_wr	   = dev->mdev->caps.max_srq_wqes - 1;
	props->max_srq_sge	   = max_rq_sg - 1;
	props->max_fast_reg_page_list_len = (unsigned int)-1;
	props->local_ca_ack_delay  = dev->mdev->caps.local_ca_ack_delay;
	props->local_ca_ack_delay  = gen->local_ca_ack_delay;
	props->atomic_cap	   = IB_ATOMIC_NONE;
	props->masked_atomic_cap   = IB_ATOMIC_NONE;
	props->max_pkeys	   = be16_to_cpup((__be16 *)(out_mad->data + 28));
	props->max_mcast_grp	   = 1 << dev->mdev->caps.log_max_mcg;
	props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg;
	props->max_mcast_grp	   = 1 << gen->log_max_mcg;
	props->max_mcast_qp_attach = gen->max_qp_mcg;
	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
					   props->max_mcast_grp;
	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
@@ -254,10 +257,12 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
	struct mlx5_ib_dev *dev = to_mdev(ibdev);
	struct ib_smp *in_mad  = NULL;
	struct ib_smp *out_mad = NULL;
	struct mlx5_general_caps *gen;
	int ext_active_speed;
	int err = -ENOMEM;

	if (port < 1 || port > dev->mdev->caps.num_ports) {
	gen = &dev->mdev->caps.gen;
	if (port < 1 || port > gen->num_ports) {
		mlx5_ib_warn(dev, "invalid port number %d\n", port);
		return -EINVAL;
	}
@@ -288,8 +293,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
	props->phys_state	= out_mad->data[33] >> 4;
	props->port_cap_flags	= be32_to_cpup((__be32 *)(out_mad->data + 20));
	props->gid_tbl_len	= out_mad->data[50];
	props->max_msg_sz	= 1 << to_mdev(ibdev)->mdev->caps.log_max_msg;
	props->pkey_tbl_len	= to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len;
	props->max_msg_sz	= 1 << gen->log_max_msg;
	props->pkey_tbl_len	= gen->port[port - 1].pkey_table_len;
	props->bad_pkey_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 46));
	props->qkey_viol_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 48));
	props->active_width	= out_mad->data[31] & 0xf;
@@ -316,7 +321,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,

	/* If reported active speed is QDR, check if is FDR-10 */
	if (props->active_speed == 4) {
		if (dev->mdev->caps.ext_port_cap[port - 1] &
		if (gen->ext_port_cap[port - 1] &
		    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
			init_query_mad(in_mad);
			in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -470,6 +475,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	struct mlx5_ib_alloc_ucontext_req_v2 req;
	struct mlx5_ib_alloc_ucontext_resp resp;
	struct mlx5_ib_ucontext *context;
	struct mlx5_general_caps *gen;
	struct mlx5_uuar_info *uuari;
	struct mlx5_uar *uars;
	int gross_uuars;
@@ -480,6 +486,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	int i;
	size_t reqlen;

	gen = &dev->mdev->caps.gen;
	if (!dev->ib_active)
		return ERR_PTR(-EAGAIN);

@@ -512,14 +519,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,

	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
	resp.qp_tab_size      = 1 << dev->mdev->caps.log_max_qp;
	resp.bf_reg_size      = dev->mdev->caps.bf_reg_size;
	resp.qp_tab_size      = 1 << gen->log_max_qp;
	resp.bf_reg_size      = gen->bf_reg_size;
	resp.cache_line_size  = L1_CACHE_BYTES;
	resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz;
	resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz;
	resp.max_send_wqebb = dev->mdev->caps.max_wqes;
	resp.max_recv_wr = dev->mdev->caps.max_wqes;
	resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes;
	resp.max_sq_desc_sz = gen->max_sq_desc_sz;
	resp.max_rq_desc_sz = gen->max_rq_desc_sz;
	resp.max_send_wqebb = gen->max_wqes;
	resp.max_recv_wr = gen->max_wqes;
	resp.max_srq_recv_wr = gen->max_srq_wqes;

	context = kzalloc(sizeof(*context), GFP_KERNEL);
	if (!context)
@@ -565,7 +572,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
	mutex_init(&context->db_page_mutex);

	resp.tot_uuars = req.total_num_uuars;
	resp.num_ports = dev->mdev->caps.num_ports;
	resp.num_ports = gen->num_ports;
	err = ib_copy_to_udata(udata, &resp,
			       sizeof(resp) - sizeof(resp.reserved));
	if (err)
@@ -967,9 +974,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,

static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
	struct mlx5_general_caps *gen;
	int port;

	for (port = 1; port <= dev->mdev->caps.num_ports; port++)
	gen = &dev->mdev->caps.gen;
	for (port = 1; port <= gen->num_ports; port++)
		mlx5_query_ext_port_caps(dev, port);
}

@@ -977,9 +986,11 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
{
	struct ib_device_attr *dprops = NULL;
	struct ib_port_attr *pprops = NULL;
	struct mlx5_general_caps *gen;
	int err = 0;
	int port;

	gen = &dev->mdev->caps.gen;
	pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
	if (!pprops)
		goto out;
@@ -994,14 +1005,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
		goto out;
	}

	for (port = 1; port <= dev->mdev->caps.num_ports; port++) {
	for (port = 1; port <= gen->num_ports; port++) {
		err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
		if (err) {
			mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
			break;
		}
		dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
		dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
		gen->port[port - 1].pkey_table_len = dprops->max_pkeys;
		gen->port[port - 1].gid_table_len = pprops->gid_tbl_len;
		mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
			    dprops->max_pkeys, pprops->gid_tbl_len);
	}
@@ -1279,8 +1290,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
	strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
	dev->ib_dev.owner		= THIS_MODULE;
	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
	dev->ib_dev.local_dma_lkey	= mdev->caps.reserved_lkey;
	dev->num_ports		= mdev->caps.num_ports;
	dev->ib_dev.local_dma_lkey	= mdev->caps.gen.reserved_lkey;
	dev->num_ports		= mdev->caps.gen.num_ports;
	dev->ib_dev.phys_port_cnt     = dev->num_ports;
	dev->ib_dev.num_comp_vectors	= dev->num_comp_vectors;
	dev->ib_dev.dma_device	= &mdev->pdev->dev;
@@ -1355,7 +1366,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
	dev->ib_dev.free_fast_reg_page_list  = mlx5_ib_free_fast_reg_page_list;
	dev->ib_dev.check_mr_status	= mlx5_ib_check_mr_status;

	if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) {
	if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
		dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
		dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
		dev->ib_dev.uverbs_cmd_mask |=
+47 −25
Original line number Diff line number Diff line
@@ -158,11 +158,13 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
		       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
{
	struct mlx5_general_caps *gen;
	int wqe_size;
	int wq_size;

	gen = &dev->mdev->caps.gen;
	/* Sanity check RQ size before proceeding */
	if (cap->max_recv_wr  > dev->mdev->caps.max_wqes)
	if (cap->max_recv_wr  > gen->max_wqes)
		return -EINVAL;

	if (!has_rq) {
@@ -182,10 +184,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
			wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
			wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
			qp->rq.wqe_cnt = wq_size / wqe_size;
			if (wqe_size > dev->mdev->caps.max_rq_desc_sz) {
			if (wqe_size > gen->max_rq_desc_sz) {
				mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
					    wqe_size,
					    dev->mdev->caps.max_rq_desc_sz);
					    gen->max_rq_desc_sz);
				return -EINVAL;
			}
			qp->rq.wqe_shift = ilog2(wqe_size);
@@ -266,9 +268,11 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
			struct mlx5_ib_qp *qp)
{
	struct mlx5_general_caps *gen;
	int wqe_size;
	int wq_size;

	gen = &dev->mdev->caps.gen;
	if (!attr->cap.max_send_wr)
		return 0;

@@ -277,9 +281,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
	if (wqe_size < 0)
		return wqe_size;

	if (wqe_size > dev->mdev->caps.max_sq_desc_sz) {
	if (wqe_size > gen->max_sq_desc_sz) {
		mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
			    wqe_size, dev->mdev->caps.max_sq_desc_sz);
			    wqe_size, gen->max_sq_desc_sz);
		return -EINVAL;
	}

@@ -292,9 +296,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,

	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
	if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
	if (qp->sq.wqe_cnt > gen->max_wqes) {
		mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
			    qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
			    qp->sq.wqe_cnt, gen->max_wqes);
		return -ENOMEM;
	}
	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -309,11 +313,13 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
			    struct mlx5_ib_qp *qp,
			    struct mlx5_ib_create_qp *ucmd)
{
	struct mlx5_general_caps *gen;
	int desc_sz = 1 << qp->sq.wqe_shift;

	if (desc_sz > dev->mdev->caps.max_sq_desc_sz) {
	gen = &dev->mdev->caps.gen;
	if (desc_sz > gen->max_sq_desc_sz) {
		mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
			     desc_sz, dev->mdev->caps.max_sq_desc_sz);
			     desc_sz, gen->max_sq_desc_sz);
		return -EINVAL;
	}

@@ -325,9 +331,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,

	qp->sq.wqe_cnt = ucmd->sq_wqe_count;

	if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
	if (qp->sq.wqe_cnt > gen->max_wqes) {
		mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
			     qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
			     qp->sq.wqe_cnt, gen->max_wqes);
		return -EINVAL;
	}

@@ -803,16 +809,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
	struct mlx5_ib_resources *devr = &dev->devr;
	struct mlx5_ib_create_qp_resp resp;
	struct mlx5_create_qp_mbox_in *in;
	struct mlx5_general_caps *gen;
	struct mlx5_ib_create_qp ucmd;
	int inlen = sizeof(*in);
	int err;

	gen = &dev->mdev->caps.gen;
	mutex_init(&qp->mutex);
	spin_lock_init(&qp->sq.lock);
	spin_lock_init(&qp->rq.lock);

	if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
		if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
		if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
			mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
			return -EINVAL;
		} else {
@@ -851,9 +859,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
				mlx5_ib_dbg(dev, "invalid rq params\n");
				return -EINVAL;
			}
			if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) {
			if (ucmd.sq_wqe_count > gen->max_wqes) {
				mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
					    ucmd.sq_wqe_count, dev->mdev->caps.max_wqes);
					    ucmd.sq_wqe_count, gen->max_wqes);
				return -EINVAL;
			}
			err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1144,6 +1152,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
				struct ib_qp_init_attr *init_attr,
				struct ib_udata *udata)
{
	struct mlx5_general_caps *gen;
	struct mlx5_ib_dev *dev;
	struct mlx5_ib_qp *qp;
	u16 xrcdn = 0;
@@ -1161,11 +1170,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
		}
		dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
	}
	gen = &dev->mdev->caps.gen;

	switch (init_attr->qp_type) {
	case IB_QPT_XRC_TGT:
	case IB_QPT_XRC_INI:
		if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
		if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
			mlx5_ib_dbg(dev, "XRC not supported\n");
			return ERR_PTR(-ENOSYS);
		}
@@ -1272,6 +1282,9 @@ enum {

static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
	struct mlx5_general_caps *gen;

	gen = &dev->mdev->caps.gen;
	if (rate == IB_RATE_PORT_CURRENT) {
		return 0;
	} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1279,7 +1292,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
	} else {
		while (rate != IB_RATE_2_5_GBPS &&
		       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
			 dev->mdev->caps.stat_rate_support))
			 gen->stat_rate_support))
			--rate;
	}

@@ -1290,8 +1303,10 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
			 struct mlx5_qp_path *path, u8 port, int attr_mask,
			 u32 path_flags, const struct ib_qp_attr *attr)
{
	struct mlx5_general_caps *gen;
	int err;

	gen = &dev->mdev->caps.gen;
	path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
	path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;

@@ -1318,9 +1333,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
	path->port = port;

	if (ah->ah_flags & IB_AH_GRH) {
		if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) {
		if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
			pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
			       ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len);
			       ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
			return -EINVAL;
		}

@@ -1492,6 +1507,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
	struct mlx5_ib_qp *qp = to_mqp(ibqp);
	struct mlx5_ib_cq *send_cq, *recv_cq;
	struct mlx5_qp_context *context;
	struct mlx5_general_caps *gen;
	struct mlx5_modify_qp_mbox_in *in;
	struct mlx5_ib_pd *pd;
	enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1500,6 +1516,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
	int mlx5_st;
	int err;

	gen = &dev->mdev->caps.gen;
	in = kzalloc(sizeof(*in), GFP_KERNEL);
	if (!in)
		return -ENOMEM;
@@ -1539,7 +1556,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
			err = -EINVAL;
			goto out;
		}
		context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg;
		context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
	}

	if (attr_mask & IB_QP_DEST_QPN)
@@ -1685,9 +1702,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
	struct mlx5_ib_qp *qp = to_mqp(ibqp);
	enum ib_qp_state cur_state, new_state;
	struct mlx5_general_caps *gen;
	int err = -EINVAL;
	int port;

	gen = &dev->mdev->caps.gen;
	mutex_lock(&qp->mutex);

	cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1699,21 +1718,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
		goto out;

	if ((attr_mask & IB_QP_PORT) &&
	    (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports))
	    (attr->port_num == 0 || attr->port_num > gen->num_ports))
		goto out;

	if (attr_mask & IB_QP_PKEY_INDEX) {
		port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
		if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len)
		if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
			goto out;
	}

	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
	    attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp)
	    attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
		goto out;

	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
	    attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp)
	    attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
		goto out;

	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -2893,7 +2912,8 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
	memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
	ib_ah_attr->port_num	  = path->port;

	if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
	if (ib_ah_attr->port_num == 0 ||
	    ib_ah_attr->port_num > dev->caps.gen.num_ports)
		return;

	ib_ah_attr->sl = path->sl & 0xf;
@@ -3011,10 +3031,12 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
					  struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(ibdev);
	struct mlx5_general_caps *gen;
	struct mlx5_ib_xrcd *xrcd;
	int err;

	if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC))
	gen = &dev->mdev->caps.gen;
	if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
		return ERR_PTR(-ENOSYS);

	xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
+4 −2
Original line number Diff line number Diff line
@@ -238,6 +238,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
				  struct ib_udata *udata)
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_general_caps *gen;
	struct mlx5_ib_srq *srq;
	int desc_size;
	int buf_size;
@@ -247,11 +248,12 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
	int is_xrc;
	u32 flgs, xrcdn;

	gen = &dev->mdev->caps.gen;
	/* Sanity check SRQ size before proceeding */
	if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) {
	if (init_attr->attr.max_wr >= gen->max_srq_wqes) {
		mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
			    init_attr->attr.max_wr,
			    dev->mdev->caps.max_srq_wqes);
			    gen->max_srq_wqes);
		return ERR_PTR(-EINVAL);
	}

Loading