Commit c515e70d authored by Saeed Mahameed's avatar Saeed Mahameed
Browse files


This merge commit includes some misc shared code updates from mlx5-next branch needed
for net-next.

1) From Aya: Enable general events on all physical link types and
   restrict general event handling of subtype DELAY_DROP_TIMEOUT in mlx5 rdma
   driver to ethernet links only as it was intended.

2) From Eli: Introduce low level bits for prio tag mode

3) From Maor: Low level steering updates to support RDMA RX flow
   steering and enables RoCE loopback traffic when switchdev is enabled.

4) From Vu and Parav: Two small mlx5 core cleanups

5) From Yevgeny add HW definitions of geneve offloads

Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parents 2a369ae0 91a40a48
Loading
Loading
Loading
Loading
+11 −5
Original line number Diff line number Diff line
@@ -181,7 +181,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
							  ibdev->rep->vport);
			if (rep_ndev == ndev)
				roce->netdev = ndev;
		} else if (ndev->dev.parent == &mdev->pdev->dev) {
		} else if (ndev->dev.parent == mdev->device) {
			roce->netdev = ndev;
		}
		write_unlock(&roce->netdev_lock);
@@ -4354,8 +4354,12 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
				 struct ib_event *ibev)
{
	u8 port = (eqe->data.port.port >> 4) & 0xf;

	switch (eqe->sub_type) {
	case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
		if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
					    IB_LINK_LAYER_ETHERNET)
			schedule_work(&ibdev->delay_drop.delay_drop_work);
		break;
	default: /* do nothing */
@@ -5673,7 +5677,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
			}

			if (bound) {
				dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
				dev_dbg(mpi->mdev->device,
					"removing port from unaffiliated list.\n");
				mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
				list_del(&mpi->list);
				break;
@@ -5872,7 +5877,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
	dev->ib_dev.phys_port_cnt	= dev->num_ports;
	dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
	dev->ib_dev.dev.parent		= &mdev->pdev->dev;
	dev->ib_dev.dev.parent		= mdev->device;

	mutex_init(&dev->cap_mask_mutex);
	INIT_LIST_HEAD(&dev->qp_list);
@@ -6561,7 +6566,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)

	if (!bound) {
		list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
		dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
		dev_dbg(mdev->device,
			"no suitable IB device found to bind to, added to unaffiliated list.\n");
	}
	mutex_unlock(&mlx5_ib_multiport_mutex);

+1 −1
Original line number Diff line number Diff line
@@ -36,7 +36,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH)   += eswitch.o eswitch_offloads.o ecpf.o
mlx5_core-$(CONFIG_MLX5_ESWITCH)   += eswitch.o eswitch_offloads.o ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS)      += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN)          += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
+10 −9
Original line number Diff line number Diff line
@@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
					   int node)
{
	struct mlx5_priv *priv = &dev->priv;
	struct device *device = dev->device;
	int original_node;
	void *cpu_handle;

	mutex_lock(&priv->alloc_mutex);
	original_node = dev_to_node(&dev->pdev->dev);
	set_dev_node(&dev->pdev->dev, node);
	cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
	original_node = dev_to_node(device);
	set_dev_node(device, node);
	cpu_handle = dma_alloc_coherent(device, size, dma_handle,
					GFP_KERNEL);
	set_dev_node(&dev->pdev->dev, original_node);
	set_dev_node(device, original_node);
	mutex_unlock(&priv->alloc_mutex);
	return cpu_handle;
}
@@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);

void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
	dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
	dma_free_coherent(dev->device, buf->size, buf->frags->buf,
			  buf->frags->map);

	kfree(buf->frags);
@@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
		if (!frag->buf)
			goto err_free_buf;
		if (frag->map & ((1 << buf->page_shift) - 1)) {
			dma_free_coherent(&dev->pdev->dev, frag_sz,
			dma_free_coherent(dev->device, frag_sz,
					  buf->frags[i].buf, buf->frags[i].map);
			mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
				       &frag->map, buf->page_shift);
@@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,

err_free_buf:
	while (i--)
		dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
		dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
				  buf->frags[i].map);
	kfree(buf->frags);
err_out:
@@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
	for (i = 0; i < buf->npages; i++) {
		int frag_sz = min_t(int, size, PAGE_SIZE);

		dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
		dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
				  buf->frags[i].map);
		size -= frag_sz;
	}
@@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
	__set_bit(db->index, db->u.pgdir->bitmap);

	if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
		dma_free_coherent(dev->device, PAGE_SIZE,
				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
		list_del(&db->u.pgdir->list);
		bitmap_free(db->u.pgdir->bitmap);
+4 −5
Original line number Diff line number Diff line
@@ -1347,7 +1347,7 @@ static void set_wqname(struct mlx5_core_dev *dev)
	struct mlx5_cmd *cmd = &dev->cmd;

	snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
		 dev->priv.name);
		 dev_name(dev->device));
}

static void clean_debug_files(struct mlx5_core_dev *dev)
@@ -1852,7 +1852,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)

static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
	struct device *ddev = &dev->pdev->dev;
	struct device *ddev = dev->device;

	cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
						&cmd->alloc_dma, GFP_KERNEL);
@@ -1883,7 +1883,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)

static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
	struct device *ddev = &dev->pdev->dev;
	struct device *ddev = dev->device;

	dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
			  cmd->alloc_dma);
@@ -1908,8 +1908,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
		return -EINVAL;
	}

	cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align,
				    0);
	cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
	if (!cmd->pool)
		return -ENOMEM;

+3 −2
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw,
	TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),

	TP_STRUCT__entry(
		__string(dev_name, tracer->dev->priv.name)
		__string(dev_name, dev_name(tracer->dev->device))
		__field(u64, trace_timestamp)
		__field(bool, lost)
		__field(u8, event_id)
@@ -55,7 +55,8 @@ TRACE_EVENT(mlx5_fw,
	),

	TP_fast_assign(
		__assign_str(dev_name, tracer->dev->priv.name);
		__assign_str(dev_name,
			     dev_name(tracer->dev->device));
		__entry->trace_timestamp = trace_timestamp;
		__entry->lost = lost;
		__entry->event_id = event_id;
Loading