Commit 79237743 authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

Merge branch 'mlx5_uar' into rdma.git /for-next

Meir Lichtinger says:

====================
ConnectX-7 supports setting relaxed ordering read/write mkey attribute by
UMR, indicated by new HCA capabilities, so extend mlx5_ib driver to
configure UMR control segment
====================

Based on the mlx5-next branch at
      git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
due to dependencies.

* branch 'mlx5_uar':
  RDMA/mlx5: Set mkey relaxed ordering by UMR with ConnectX-7
  RDMA/mlx5: Use MLX5_SET macro instead of local structure
  RDMA/mlx5: ConnectX-7 new capabilities to set relaxed ordering by UMR
parents 8b603d07 896ec973
Loading
Loading
Loading
Loading
+7 −11
Original line number Diff line number Diff line
@@ -1356,15 +1356,6 @@ static inline void init_query_mad(struct ib_smp *mad)
	mad->method	   = IB_MGMT_METHOD_GET;
}

static inline u8 convert_access(int acc)
{
	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
	       MLX5_PERM_LOCAL_READ;
}

static inline int is_qp1(enum ib_qp_type qp_type)
{
	return qp_type == MLX5_IB_QPT_HW_GSI;
@@ -1463,8 +1454,13 @@ static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
		return false;

	if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
	    (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) ||
	     MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)))
	    MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
		return false;

	if (access_flags & IB_ACCESS_RELAXED_ORDERING &&
	     MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
	     !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
		return false;

	return true;
+50 −18
Original line number Diff line number Diff line
@@ -263,7 +263,9 @@ static __be64 get_umr_update_translation_mask(void)
	return cpu_to_be64(result);
}

static __be64 get_umr_update_access_mask(int atomic)
static __be64 get_umr_update_access_mask(int atomic,
					 int relaxed_ordering_write,
					 int relaxed_ordering_read)
{
	u64 result;

@@ -275,6 +277,12 @@ static __be64 get_umr_update_access_mask(int atomic)
	if (atomic)
		result |= MLX5_MKEY_MASK_A;

	if (relaxed_ordering_write)
		result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE;

	if (relaxed_ordering_read)
		result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ;

	return cpu_to_be64(result);
}

@@ -289,17 +297,28 @@ static __be64 get_umr_update_pd_mask(void)

static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
{
	if ((mask & MLX5_MKEY_MASK_PAGE_SIZE &&
	     MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) ||
	    (mask & MLX5_MKEY_MASK_A &&
	     MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)))
	if (mask & MLX5_MKEY_MASK_PAGE_SIZE &&
	    MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
		return -EPERM;

	if (mask & MLX5_MKEY_MASK_A &&
	    MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
		return -EPERM;

	if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE &&
	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
		return -EPERM;

	if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ &&
	    !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
		return -EPERM;

	return 0;
}

static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
			       struct mlx5_wqe_umr_ctrl_seg *umr,
			       const struct ib_send_wr *wr, int atomic)
			       const struct ib_send_wr *wr)
{
	const struct mlx5_umr_wr *umrwr = umr_wr(wr);

@@ -325,7 +344,10 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
		umr->mkey_mask |= get_umr_update_translation_mask();
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
		umr->mkey_mask |= get_umr_update_access_mask(atomic);
		umr->mkey_mask |= get_umr_update_access_mask(
			!!(MLX5_CAP_GEN(dev->mdev, atomic)),
			!!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)),
			!!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)));
		umr->mkey_mask |= get_umr_update_pd_mask();
	}
	if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
@@ -383,20 +405,31 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg,

	memset(seg, 0, sizeof(*seg));
	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
		seg->status = MLX5_MKEY_STATUS_FREE;
		MLX5_SET(mkc, seg, free, 1);

	MLX5_SET(mkc, seg, a,
		 !!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC));
	MLX5_SET(mkc, seg, rw,
		 !!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE));
	MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ));
	MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE));
	MLX5_SET(mkc, seg, lr, 1);
	MLX5_SET(mkc, seg, relaxed_ordering_write,
		 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
	MLX5_SET(mkc, seg, relaxed_ordering_read,
		 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));

	seg->flags = convert_access(umrwr->access_flags);
	if (umrwr->pd)
		seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
		MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn);
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
	    !umrwr->length)
		seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
		MLX5_SET(mkc, seg, length64, 1);

	seg->start_addr = cpu_to_be64(umrwr->virt_addr);
	seg->len = cpu_to_be64(umrwr->length);
	seg->log2_page_size = umrwr->page_shift;
	seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
				       mlx5_mkey_variant(umrwr->mkey));
	MLX5_SET64(mkc, seg, start_addr, umrwr->virt_addr);
	MLX5_SET64(mkc, seg, len, umrwr->length);
	MLX5_SET(mkc, seg, log_page_size, umrwr->page_shift);
	MLX5_SET(mkc, seg, qpn, 0xffffff);
	MLX5_SET(mkc, seg, mkey_7_0, mlx5_mkey_variant(umrwr->mkey));
}

static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
@@ -1224,8 +1257,7 @@ static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,

	qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
	(*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey);
	err = set_reg_umr_segment(dev, *seg, wr,
				  !!(MLX5_CAP_GEN(dev->mdev, atomic)));
	err = set_reg_umr_segment(dev, *seg, wr);
	if (unlikely(err))
		goto out;
	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+9 −2
Original line number Diff line number Diff line
@@ -299,11 +299,18 @@ void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_array);

void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
{
	int i;

	WARN_ON(perm & 0xfc);
	for (i = 0; i < buf->npages; i++)
		pas[i] = cpu_to_be64(buf->frags[i].map);
		pas[i] = cpu_to_be64(buf->frags[i].map | perm);
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array_perm);

void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
{
	mlx5_fill_page_frag_array_perm(buf, pas, 0);
}
EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
+1 −0
Original line number Diff line number Diff line
@@ -1598,6 +1598,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
static bool counter_is_valid(u32 action)
{
	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
			  MLX5_FLOW_CONTEXT_ACTION_ALLOW |
			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
}

+5 −4
Original line number Diff line number Diff line
@@ -276,7 +276,9 @@ enum {
	MLX5_MKEY_MASK_RW		= 1ull << 20,
	MLX5_MKEY_MASK_A		= 1ull << 21,
	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
	MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE	= 1ull << 25,
	MLX5_MKEY_MASK_FREE			= 1ull << 29,
	MLX5_MKEY_MASK_RELAXED_ORDERING_READ	= 1ull << 47,
};

enum {
@@ -1007,7 +1009,6 @@ enum {
	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
	MLX5_MKEY_BSF_EN	= 1 << 30,
	MLX5_MKEY_LEN64		= 1 << 31,
};

struct mlx5_mkey_seg {
@@ -1361,11 +1362,11 @@ enum mlx5_qcam_feature_groups {
	MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)

#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
	MLX5_GET(device_virtio_emulation_cap, \
	MLX5_GET(virtio_emulation_cap, \
		(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)

#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
	MLX5_GET64(device_virtio_emulation_cap, \
	MLX5_GET64(virtio_emulation_cap, \
		(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)

#define MLX5_CAP_IPSEC(mdev, cap)\
Loading