Commit a477605f authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'dpaa2-eth-add-PFC-support'



Ioana Ciornei says:

====================
dpaa2-eth: add PFC support

This patch set adds support for Priority Flow Control in DPAA2 Ethernet
devices.

The first patch make the necessary changes so that multiple
traffic classes are configured. The dequeue priority
of the maximum 8 traffic classes is configured to be equal.
The second patch adds a static distribution to said traffic
classes based on the VLAN PCP field. In the future, this could be
extended through the .setapp() DCB callback for dynamic configuration.

Also, add support for the congestion group taildrop mechanism that
allows us to control the number of frames that can accumulate on a group
of Rx frame queues belonging to the same traffic class.

The basic subset of the DCB ops is implemented so that the user can
query the number of PFC capable traffic classes, their state and
reconfigure them if necessary.

Changes in v3:
 - add patches 6-7 which add the PFC functionality
 - patch 2/7: revert to explicitly cast mask to u16 * to not get into
   sparse warnings
Changes in v4:
 - really fix the sparse warnings in 2/7
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3190ca3b 07beb165
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -9,6 +9,16 @@ config FSL_DPAA2_ETH
	  The driver manages network objects discovered on the Freescale
	  MC bus.

if FSL_DPAA2_ETH
config FSL_DPAA2_ETH_DCB
	bool "Data Center Bridging (DCB) Support"
	default n
	depends on DCB
	help
	  Enable Priority-Based Flow Control (PFC) support for DPAA2 Ethernet
	  devices.
endif

config FSL_DPAA2_PTP_CLOCK
	tristate "Freescale DPAA2 PTP Clock"
	depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
+1 −0
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK)	+= fsl-dpaa2-ptp.o

fsl-dpaa2-eth-objs	:= dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs	:= dpaa2-ptp.o dprtc.o

+150 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2020 NXP */

#include "dpaa2-eth.h"

static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
				       struct ieee_pfc *pfc)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);

	if (!(priv->link_state.options & DPNI_LINK_OPT_PFC_PAUSE))
		return 0;

	memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
	pfc->pfc_cap = dpaa2_eth_tc_count(priv);

	return 0;
}

static inline bool is_prio_enabled(u8 pfc_en, u8 tc)
{
	return !!(pfc_en & (1 << tc));
}

static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
{
	struct dpni_congestion_notification_cfg cfg = {0};
	int i, err;

	cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
	cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
	cfg.message_iova = 0ULL;
	cfg.message_ctx = 0ULL;

	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
		if (is_prio_enabled(pfc_en, i)) {
			cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
			cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
		} else {
			/* For priorities not set in the pfc_en mask, we leave
			 * the congestion thresholds at zero, which effectively
			 * disables generation of PFC frames for them
			 */
			cfg.threshold_entry = 0;
			cfg.threshold_exit = 0;
		}

		err = dpni_set_congestion_notification(priv->mc_io, 0,
						       priv->mc_token,
						       DPNI_QUEUE_RX, i, &cfg);
		if (err) {
			netdev_err(priv->net_dev,
				   "dpni_set_congestion_notification failed\n");
			return err;
		}
	}

	return 0;
}

static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
				       struct ieee_pfc *pfc)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	struct dpni_link_cfg link_cfg = {0};
	bool tx_pause;
	int err;

	if (pfc->mbc || pfc->delay)
		return -EOPNOTSUPP;

	/* If same PFC enabled mask, nothing to do */
	if (priv->pfc.pfc_en == pfc->pfc_en)
		return 0;

	/* We allow PFC configuration even if it won't have any effect until
	 * general pause frames are enabled
	 */
	tx_pause = dpaa2_eth_tx_pause_enabled(priv->link_state.options);
	if (!dpaa2_eth_rx_pause_enabled(priv->link_state.options) || !tx_pause)
		netdev_warn(net_dev, "Pause support must be enabled in order for PFC to work!\n");

	link_cfg.rate = priv->link_state.rate;
	link_cfg.options = priv->link_state.options;
	if (pfc->pfc_en)
		link_cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
	else
		link_cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
	if (err) {
		netdev_err(net_dev, "dpni_set_link_cfg failed\n");
		return err;
	}

	/* Configure congestion notifications for the enabled priorities */
	err = set_pfc_cn(priv, pfc->pfc_en);
	if (err)
		return err;

	memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
	priv->pfc_enabled = !!pfc->pfc_en;

	dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);

	return 0;
}

static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);

	return priv->dcbx_mode;
}

static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);

	return (mode != (priv->dcbx_mode)) ? 1 : 0;
}

static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);

	switch (capid) {
	case DCB_CAP_ATTR_PFC:
		*cap = true;
		break;
	case DCB_CAP_ATTR_PFC_TCS:
		*cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
		break;
	case DCB_CAP_ATTR_DCBX:
		*cap = priv->dcbx_mode;
		break;
	default:
		*cap = false;
		break;
	}

	return 0;
}

const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
	.ieee_getpfc	= dpaa2_eth_dcbnl_ieee_getpfc,
	.ieee_setpfc	= dpaa2_eth_dcbnl_ieee_setpfc,
	.getdcbx	= dpaa2_eth_dcbnl_getdcbx,
	.setdcbx	= dpaa2_eth_dcbnl_setdcbx,
	.getcap		= dpaa2_eth_dcbnl_getcap,
};
+4 −3
Original line number Diff line number Diff line
@@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
	int i, err;

	seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
	seq_printf(file, "%s%16s%16s%16s%16s\n",
		   "VFQID", "CPU", "Type", "Frames", "Pending frames");
	seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
		   "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");

	for (i = 0; i <  priv->num_fqs; i++) {
		fq = &priv->fq[i];
@@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
		if (err)
			fcnt = 0;

		seq_printf(file, "%5d%16d%16s%16llu%16u\n",
		seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
			   fq->fqid,
			   fq->target_cpu,
			   fq->tc,
			   fq_type_to_str(fq),
			   fq->stats.frames,
			   fcnt);
+217 −33
Original line number Diff line number Diff line
@@ -1287,31 +1287,67 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
	}
}

static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
			       bool tx_pause, bool pfc)
{
	struct dpni_taildrop td = {0};
	struct dpaa2_eth_fq *fq;
	int i, err;

	if (priv->rx_td_enabled == enable)
		return;
	/* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
	 * flow control is disabled (as it might interfere with either the
	 * buffer pool depletion trigger for pause frames or with the group
	 * congestion trigger for PFC frames)
	 */
	td.enable = !tx_pause;
	if (priv->rx_fqtd_enabled == td.enable)
		goto set_cgtd;

	td.enable = enable;
	td.threshold = DPAA2_ETH_TAILDROP_THRESH;
	td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
	td.units = DPNI_CONGESTION_UNIT_BYTES;

	for (i = 0; i < priv->num_fqs; i++) {
		if (priv->fq[i].type != DPAA2_RX_FQ)
		fq = &priv->fq[i];
		if (fq->type != DPAA2_RX_FQ)
			continue;
		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
					DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
					priv->fq[i].flowid, &td);
					DPNI_CP_QUEUE, DPNI_QUEUE_RX,
					fq->tc, fq->flowid, &td);
		if (err) {
			netdev_err(priv->net_dev,
				   "dpni_set_taildrop() failed\n");
			break;
				   "dpni_set_taildrop(FQ) failed\n");
			return;
		}
	}

	priv->rx_fqtd_enabled = td.enable;

set_cgtd:
	/* Congestion group taildrop: threshold is in frames, per group
	 * of FQs belonging to the same traffic class
	 * Enabled if general Tx pause disabled or if PFCs are enabled
	 * (congestion group threhsold for PFC generation is lower than the
	 * CG taildrop threshold, so it won't interfere with it; we also
	 * want frames in non-PFC enabled traffic classes to be kept in check)
	 */
	td.enable = !tx_pause || (tx_pause && pfc);
	if (priv->rx_cgtd_enabled == td.enable)
		return;

	td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
	td.units = DPNI_CONGESTION_UNIT_FRAMES;
	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
					DPNI_CP_GROUP, DPNI_QUEUE_RX,
					i, 0, &td);
		if (err) {
			netdev_err(priv->net_dev,
				   "dpni_set_taildrop(CG) failed\n");
			return;
		}
	}

	priv->rx_td_enabled = enable;
	priv->rx_cgtd_enabled = td.enable;
}

static int link_state_update(struct dpaa2_eth_priv *priv)
@@ -1331,9 +1367,8 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
	 * Rx FQ taildrop configuration as well. We configure taildrop
	 * only when pause frame generation is disabled.
	 */
	tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
		   !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
	dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
	tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
	dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);

	/* When we manage the MAC/PHY using phylink there is no need
	 * to manually update the netif_carrier.
@@ -2407,7 +2442,7 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)

static void setup_fqs(struct dpaa2_eth_priv *priv)
{
	int i;
	int i, j;

	/* We have one TxConf FQ per Tx flow.
	 * The number of Tx and Rx queues is the same.
@@ -2419,11 +2454,14 @@ static void setup_fqs(struct dpaa2_eth_priv *priv)
		priv->fq[priv->num_fqs++].flowid = (u16)i;
	}

	for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
		for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
			priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
			priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
			priv->fq[priv->num_fqs].tc = (u8)j;
			priv->fq[priv->num_fqs++].flowid = (u16)i;
		}
	}

	/* For each FQ, decide on which core to process incoming frames */
	set_fq_affinity(priv);
@@ -2691,6 +2729,118 @@ out_err:
	priv->enqueue = dpaa2_eth_enqueue_qd;
}

/* Configure ingress classification based on VLAN PCP */
static int set_vlan_qos(struct dpaa2_eth_priv *priv)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpkg_profile_cfg kg_cfg = {0};
	struct dpni_qos_tbl_cfg qos_cfg = {0};
	struct dpni_rule_cfg key_params;
	void *dma_mem, *key, *mask;
	u8 key_size = 2;	/* VLAN TCI field */
	int i, pcp, err;

	/* VLAN-based classification only makes sense if we have multiple
	 * traffic classes.
	 * Also, we need to extract just the 3-bit PCP field from the VLAN
	 * header and we can only do that by using a mask
	 */
	if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
		dev_dbg(dev, "VLAN-based QoS classification not supported\n");
		return -EOPNOTSUPP;
	}

	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
	if (!dma_mem)
		return -ENOMEM;

	kg_cfg.num_extracts = 1;
	kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
	kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
	kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
	kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;

	err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
	if (err) {
		dev_err(dev, "dpni_prepare_key_cfg failed\n");
		goto out_free_tbl;
	}

	/* set QoS table */
	qos_cfg.default_tc = 0;
	qos_cfg.discard_on_miss = 0;
	qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
					      DPAA2_CLASSIFIER_DMA_SIZE,
					      DMA_TO_DEVICE);
	if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
		dev_err(dev, "QoS table DMA mapping failed\n");
		err = -ENOMEM;
		goto out_free_tbl;
	}

	err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
	if (err) {
		dev_err(dev, "dpni_set_qos_table failed\n");
		goto out_unmap_tbl;
	}

	/* Add QoS table entries */
	key = kzalloc(key_size * 2, GFP_KERNEL);
	if (!key) {
		err = -ENOMEM;
		goto out_unmap_tbl;
	}
	mask = key + key_size;
	*(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);

	key_params.key_iova = dma_map_single(dev, key, key_size * 2,
					     DMA_TO_DEVICE);
	if (dma_mapping_error(dev, key_params.key_iova)) {
		dev_err(dev, "Qos table entry DMA mapping failed\n");
		err = -ENOMEM;
		goto out_free_key;
	}

	key_params.mask_iova = key_params.key_iova + key_size;
	key_params.key_size = key_size;

	/* We add rules for PCP-based distribution starting with highest
	 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
	 * classes to accommodate all priority levels, the lowest ones end up
	 * on TC 0 which was configured as default
	 */
	for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
		*(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
		dma_sync_single_for_device(dev, key_params.key_iova,
					   key_size * 2, DMA_TO_DEVICE);

		err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
					 &key_params, i, i);
		if (err) {
			dev_err(dev, "dpni_add_qos_entry failed\n");
			dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
			goto out_unmap_key;
		}
	}

	priv->vlan_cls_enabled = true;

	/* Table and key memory is not persistent, clean everything up after
	 * configuration is finished
	 */
out_unmap_key:
	dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
out_free_key:
	kfree(key);
out_unmap_tbl:
	dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
			 DMA_TO_DEVICE);
out_free_tbl:
	kfree(dma_mem);

	return err;
}

/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -2753,6 +2903,10 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
			goto close;
	}

	err = set_vlan_qos(priv);
	if (err && err != -EOPNOTSUPP)
		goto close;

	priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
				       dpaa2_eth_fs_count(priv), GFP_KERNEL);
	if (!priv->cls_rules) {
@@ -2789,7 +2943,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
	int err;

	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
			     DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
	if (err) {
		dev_err(dev, "dpni_get_queue(RX) failed\n");
		return err;
@@ -2802,7 +2956,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
	queue.destination.priority = 1;
	queue.user_context = (u64)(uintptr_t)fq;
	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
			     DPNI_QUEUE_RX, 0, fq->flowid,
			     DPNI_QUEUE_RX, fq->tc, fq->flowid,
			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
			     &queue);
	if (err) {
@@ -2811,6 +2965,10 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
	}

	/* xdp_rxq setup */
	/* only once for each channel */
	if (fq->tc > 0)
		return 0;

	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
			       fq->flowid);
	if (err) {
@@ -2948,7 +3106,7 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_rx_tc_dist_cfg dist_cfg;
	int err;
	int i, err = 0;

	memset(&dist_cfg, 0, sizeof(dist_cfg));

@@ -2956,9 +3114,14 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;

	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
	if (err)
	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
		err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
					  i, &dist_cfg);
		if (err) {
			dev_err(dev, "dpni_set_rx_tc_dist failed\n");
			break;
		}
	}

	return err;
}
@@ -2968,7 +3131,7 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_rx_dist_cfg dist_cfg;
	int err;
	int i, err = 0;

	memset(&dist_cfg, 0, sizeof(dist_cfg));

@@ -2976,9 +3139,15 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
	dist_cfg.enable = 1;

	err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
	if (err)
	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
		dist_cfg.tc = i;
		err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
					    &dist_cfg);
		if (err) {
			dev_err(dev, "dpni_set_rx_hash_dist failed\n");
			break;
		}
	}

	return err;
}
@@ -2988,7 +3157,7 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_rx_dist_cfg dist_cfg;
	int err;
	int i, err = 0;

	memset(&dist_cfg, 0, sizeof(dist_cfg));

@@ -2996,9 +3165,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
	dist_cfg.enable = 1;

	err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
	if (err)
	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
		dist_cfg.tc = i;
		err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
					  &dist_cfg);
		if (err) {
			dev_err(dev, "dpni_set_rx_fs_dist failed\n");
			break;
		}
	}

	return err;
}
@@ -3684,6 +3859,15 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
	if (err)
		goto err_alloc_rings;

#ifdef CONFIG_FSL_DPAA2_ETH_DCB
	if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
		priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
		net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
	} else {
		dev_dbg(dev, "PFC not supported\n");
	}
#endif

	err = setup_irqs(dpni_dev);
	if (err) {
		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
Loading