Commit 00815752 authored by Moni Shoua's avatar Moni Shoua Committed by Doug Ledford
Browse files

IB/mlx5: Report and handle ODP support properly



ODP depends on the several device capabilities, among them is the ability
to send UMR WQEs with that modify atomic and entity size of the MR.
Therefore, only if all conditions to send such a UMR WQE are met then
driver can report that ODP is supported. Use this check of conditions
in all places where driver needs to know about ODP support.

Also, implicit ODP support depends on ability of driver to send UMR WQEs
for an indirect mkey. Therefore, verify that all conditions to do so are
met when reporting support.

Fixes: c8d75a98 ("IB/mlx5: Respect new UMR capabilities")
Signed-off-by: default avatarMoni Shoua <monis@mellanox.com>
Reviewed-by: default avatarGuy Levi <guyle@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Link: https://lore.kernel.org/r/20190815083834.9245-7-leon@kernel.org


Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 0e6613b4
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;

	if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
		if (MLX5_CAP_GEN(mdev, pg))
		if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
			props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
		props->odp_caps = dev->odp_caps;
	}
@@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
		dev->port[i].roce.last_port_state = IB_PORT_DOWN;
	}

	mlx5_ib_internal_fill_odp_caps(dev);

	err = mlx5_ib_init_multiport_master(dev);
	if (err)
		return err;
@@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)

static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
{
	mlx5_ib_internal_fill_odp_caps(dev);

	return mlx5_ib_odp_init_one(dev);
}

+9 −8
Original line number Diff line number Diff line
@@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)

	memset(caps, 0, sizeof(*caps));

	if (!MLX5_CAP_GEN(dev->mdev, pg))
	if (!MLX5_CAP_GEN(dev->mdev, pg) ||
	    !mlx5_ib_can_use_umr(dev, true))
		return;

	caps->general_caps = IB_ODP_SUPPORT;
@@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)

	if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
	    MLX5_CAP_GEN(dev->mdev, null_mkey) &&
	    MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
	    MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
	    !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
		caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;

	return;
@@ -1622,7 +1624,9 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
	int ret = 0;

	if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
	if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
		return ret;

	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);

	if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
@@ -1633,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
		}
	}

	if (!MLX5_CAP_GEN(dev->mdev, pg))
		return ret;

	ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);

	return ret;
@@ -1643,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)

void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
{
	if (!MLX5_CAP_GEN(dev->mdev, pg))
	if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
		return;

	mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);