Commit 66846b7d authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bnxt_en-Driver-update-for-net-next'



Michael Chan says:

====================
bnxt_en: Driver update for net-next.

This patchset implements ethtool -X to setup user-defined RSS indirection
table.  The new infrastructure also allows the proper logical ring index
to be used to populate the RSS indirection when queried by ethtool -x.
Prior to these patches, we were incorrectly populating the output of
ethtool -x with internal ring IDs which would make no sense to the user.

The last 2 patches add some cleanups to the VLAN acceleration logic
and check the firmware capabilities before allowing VLAN acceleration
offloads.

v4: Move bnxt_get_rxfh_indir_size() fix to a new patch #2.
    Modify patch #7 to revert RSS map to default only when necessary.

v3: Use ALIGN() in patch 5.
    Add warning messages in patch 6.

v2: Some RSS indirection table changes requested by Jakub Kicinski.
====================

Reviewed-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e80a07b2 1da63ddd
Loading
Loading
Loading
Loading
+178 −64
Original line number Diff line number Diff line
@@ -1614,7 +1614,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);

	if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
	    (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
		u16 vlan_proto = tpa_info->metadata >>
			RX_CMP_FLAGS2_METADATA_TPID_SFT;
		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
@@ -1832,7 +1832,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,

	if ((rxcmp1->rx_cmp_flags2 &
	     cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
	    (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
	    (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
		u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
		u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
@@ -3538,7 +3538,7 @@ static void bnxt_free_vnic_attributes(struct bnxt *bp)
		}

		if (vnic->rss_table) {
			dma_free_coherent(&pdev->dev, PAGE_SIZE,
			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
					  vnic->rss_table,
					  vnic->rss_table_dma_addr);
			vnic->rss_table = NULL;
@@ -3603,7 +3603,13 @@ vnic_skip_grps:
			continue;

		/* Allocate rss table and hash key */
		vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
		if (bp->flags & BNXT_FLAG_CHIP_P5)
			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);

		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
						     vnic->rss_table_size,
						     &vnic->rss_table_dma_addr,
						     GFP_KERNEL);
		if (!vnic->rss_table) {
@@ -3611,8 +3617,6 @@ vnic_skip_grps:
			goto out;
		}

		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));

		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
	}
@@ -4826,36 +4830,124 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
	}
}

static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
{
	u32 i, j, max_rings;
	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
	struct hwrm_vnic_rss_cfg_input req = {0};
	int entries;

	if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
	if (bp->flags & BNXT_FLAG_CHIP_P5)
		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
	else
		entries = HW_HASH_INDEX_SIZE;

	bp->rss_indir_tbl_entries = entries;
	bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
					  GFP_KERNEL);
	if (!bp->rss_indir_tbl)
		return -ENOMEM;
	return 0;
}

static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
{
	u16 max_rings, max_entries, pad, i;

	if (!bp->rx_nr_rings)
		return;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
	if (set_rss) {
		req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
		req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
		if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
		max_rings = bp->rx_nr_rings - 1;
	else
		max_rings = bp->rx_nr_rings;
		} else {
			max_rings = 1;

	max_entries = bnxt_get_rxfh_indir_size(bp->dev);

	for (i = 0; i < max_entries; i++)
		bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);

	pad = bp->rss_indir_tbl_entries - max_entries;
	if (pad)
		memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
}

static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
{
	u16 i, tbl_size, max_ring = 0;

	if (!bp->rss_indir_tbl)
		return 0;

	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
	for (i = 0; i < tbl_size; i++)
		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
	return max_ring;
}

int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
	if (bp->flags & BNXT_FLAG_CHIP_P5)
		return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
		return 2;
	return 1;
}

static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
	u16 i, j;

	/* Fill the RSS indirection table with ring group ids */
		for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
			if (j == max_rings)
				j = 0;
	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
		if (!no_rss)
			j = bp->rss_indir_tbl[i];
		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
	}
}

static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
				      struct bnxt_vnic_info *vnic)
{
	__le16 *ring_tbl = vnic->rss_table;
	struct bnxt_rx_ring_info *rxr;
	u16 tbl_size, i;

	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);

	for (i = 0; i < tbl_size; i++) {
		u16 ring_id, j;

		j = bp->rss_indir_tbl[i];
		rxr = &bp->rx_ring[j];

		ring_id = rxr->rx_ring_struct.fw_ring_id;
		*ring_tbl++ = cpu_to_le16(ring_id);
		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
		*ring_tbl++ = cpu_to_le16(ring_id);
	}
}

static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
	if (bp->flags & BNXT_FLAG_CHIP_P5)
		__bnxt_fill_hw_rss_tbl_p5(bp, vnic);
	else
		__bnxt_fill_hw_rss_tbl(bp, vnic);
}

static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
	struct hwrm_vnic_rss_cfg_input req = {0};

	if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
		return 0;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
	if (set_rss) {
		bnxt_fill_hw_rss_tbl(bp, vnic);
		req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
		req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
		req.hash_key_tbl_addr =
			cpu_to_le64(vnic->rss_hash_key_dma_addr);
@@ -4867,9 +4959,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
	u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
	struct hwrm_vnic_rss_cfg_input req = {0};
	dma_addr_t ring_tbl_map;
	u32 i, nr_ctxs;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -4877,31 +4969,18 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
		hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
		return 0;
	}
	bnxt_fill_hw_rss_tbl(bp, vnic);
	req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
	req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
	req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
	req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
	nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
	for (i = 0, k = 0; i < nr_ctxs; i++) {
		__le16 *ring_tbl = vnic->rss_table;
	ring_tbl_map = vnic->rss_table_dma_addr;
	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
		int rc;

		req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
		req.ring_table_pair_index = i;
		req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
		for (j = 0; j < 64; j++) {
			u16 ring_id;

			ring_id = rxr->rx_ring_struct.fw_ring_id;
			*ring_tbl++ = cpu_to_le16(ring_id);
			ring_id = bnxt_cp_ring_for_rx(bp, rxr);
			*ring_tbl++ = cpu_to_le16(ring_id);
			rxr++;
			k++;
			if (k == max_rings) {
				k = 0;
				rxr = &bp->rx_ring[0];
			}
		}
		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
		if (rc)
			return rc;
@@ -5139,6 +5218,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
		if (flags &
		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;

		/* Older P5 fw before EXT_HW_STATS support did not set
		 * VLAN_STRIP_CAP properly.
		 */
		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
		    ((bp->flags & BNXT_FLAG_CHIP_P5) &&
		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
		if (bp->max_tpa_v2)
			bp->hw_ring_stats_size =
@@ -5992,6 +6079,21 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
		rx = rx_rings << 1;
	cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
	bp->tx_nr_rings = tx;

	/* If we cannot reserve all the RX rings, reset the RSS map only
	 * if absolutely necessary
	 */
	if (rx_rings != bp->rx_nr_rings) {
		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
			    rx_rings, bp->rx_nr_rings);
		if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
			netdev_warn(bp->dev, "RSS table entries reverting to default\n");
			bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
		}
	}
	bp->rx_nr_rings = rx_rings;
	bp->cp_nr_rings = cp;

@@ -6955,7 +7057,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
	struct hwrm_func_qcaps_input req = {0};
	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
	u32 flags;
	u32 flags, flags_ext;

	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
	req.fid = cpu_to_le16(0xffff);
@@ -6980,6 +7082,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;

	flags_ext = le32_to_cpu(resp->flags_ext);
	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;

	bp->tx_push_thresh = 0;
	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -7634,7 +7742,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
{
	int rc, i, nr_ctxs;

	nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
	for (i = 0; i < nr_ctxs; i++) {
		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
		if (rc) {
@@ -8196,6 +8304,9 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
			rc = bnxt_init_int_mode(bp);
		bnxt_ulp_irq_restart(bp, rc);
	}
	if (!netif_is_rxfh_configured(bp->dev))
		bnxt_set_dflt_rss_indir_tbl(bp);

	if (rc) {
		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
		return rc;
@@ -9835,24 +9946,16 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
	 * turned on or off together.
	 */
	vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
				    NETIF_F_HW_VLAN_STAG_RX);
	if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
			      NETIF_F_HW_VLAN_STAG_RX)) {
		if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
			features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
				      NETIF_F_HW_VLAN_STAG_RX);
	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
		else if (vlan_features)
			features |= NETIF_F_HW_VLAN_CTAG_RX |
				    NETIF_F_HW_VLAN_STAG_RX;
			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
	}
#ifdef CONFIG_BNXT_SRIOV
	if (BNXT_VF(bp)) {
		if (bp->vf.vlan) {
			features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
				      NETIF_F_HW_VLAN_STAG_RX);
		}
	}
	if (BNXT_VF(bp) && bp->vf.vlan)
		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
#endif
	return features;
}
@@ -9875,7 +9978,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
		flags &= ~BNXT_FLAG_TPA;

	if (features & NETIF_F_HW_VLAN_CTAG_RX)
	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
		flags |= BNXT_FLAG_STRIP_VLAN;

	if (features & NETIF_F_NTUPLE)
@@ -11510,6 +11613,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
	bnxt_free_ctx_mem(bp);
	kfree(bp->ctx);
	bp->ctx = NULL;
	kfree(bp->rss_indir_tbl);
	bp->rss_indir_tbl = NULL;
	bnxt_free_port_stats(bp);
	free_netdev(dev);
}
@@ -11961,8 +12066,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
				    NETIF_F_GSO_GRE_CSUM;
	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
	if (BNXT_SUPPORTS_TPA(bp))
		dev->hw_features |= NETIF_F_GRO_HW;
	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
@@ -12018,7 +12125,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)

	bnxt_fw_init_one_p3(bp);

	if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
		bp->flags |= BNXT_FLAG_STRIP_VLAN;

	rc = bnxt_init_int_mode(bp);
@@ -12030,6 +12137,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
	 */
	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;

	rc = bnxt_alloc_rss_indir_tbl(bp);
	if (rc)
		goto init_err_pci_clean;
	bnxt_set_dflt_rss_indir_tbl(bp);

	if (BNXT_PF(bp)) {
		if (!bnxt_pf_wq) {
			bnxt_pf_wq =
@@ -12074,6 +12186,8 @@ init_err_pci_clean:
	bnxt_free_ctx_mem(bp);
	kfree(bp->ctx);
	bp->ctx = NULL;
	kfree(bp->rss_indir_tbl);
	bp->rss_indir_tbl = NULL;

init_err_free:
	free_netdev(dev);
+20 −0
Original line number Diff line number Diff line
@@ -1017,6 +1017,15 @@ struct bnxt_vnic_info {
	__le16		*rss_table;
	dma_addr_t	rss_hash_key_dma_addr;
	u64		*rss_hash_key;
	int		rss_table_size;
#define BNXT_RSS_TABLE_ENTRIES_P5	64
#define BNXT_RSS_TABLE_SIZE_P5		(BNXT_RSS_TABLE_ENTRIES_P5 * 4)
#define BNXT_RSS_TABLE_MAX_TBL_P5	8
#define BNXT_MAX_RSS_TABLE_SIZE_P5				\
	(BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
#define BNXT_MAX_RSS_TABLE_ENTRIES_P5				\
	(BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)

	u32		rx_mask;

	u8		*mc_list;
@@ -1648,6 +1657,8 @@ struct bnxt {
	struct bnxt_ring_grp_info	*grp_info;
	struct bnxt_vnic_info	*vnic_info;
	int			nr_vnics;
	u16			*rss_indir_tbl;
	u16			rss_indir_tbl_entries;
	u32			rss_hash_cfg;

	u16			max_mtu;
@@ -1705,6 +1716,9 @@ struct bnxt {
	#define BNXT_FW_CAP_ERR_RECOVER_RELOAD		0x00100000
	#define BNXT_FW_CAP_HOT_RESET			0x00200000
	#define BNXT_FW_CAP_SHARED_PORT_CFG		0x00400000
	#define BNXT_FW_CAP_VLAN_RX_STRIP		0x01000000
	#define BNXT_FW_CAP_VLAN_TX_INSERT		0x02000000
	#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED	0x04000000

#define BNXT_NEW_RM(bp)		((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
	u32			hwrm_spec_code;
@@ -1895,6 +1909,11 @@ struct bnxt {
#define BNXT_PCIE_STATS_OFFSET(counter)			\
	(offsetof(struct pcie_ctx_hw_stats, counter) / 8)

#define BNXT_HW_FEATURE_VLAN_ALL_RX				\
	(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
#define BNXT_HW_FEATURE_VLAN_ALL_TX				\
	(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)

#define I2C_DEV_ADDR_A0				0xa0
#define I2C_DEV_ADDR_A2				0xa2
#define SFF_DIAG_SUPPORT_OFFSET			0x5c
@@ -2028,6 +2047,7 @@ int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
			    int bmap_size, bool async_only);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
+47 −5
Original line number Diff line number Diff line
@@ -926,6 +926,13 @@ static int bnxt_set_channels(struct net_device *dev,
		return rc;
	}

	if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
	    bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
	    (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
		netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
		return -EINVAL;
	}

	if (netif_running(dev)) {
		if (BNXT_PF(bp)) {
			/* TODO CHIMP_FW: Send message to all VF's
@@ -1273,8 +1280,12 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
	return rc;
}

static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
{
	struct bnxt *bp = netdev_priv(dev);

	if (bp->flags & BNXT_FLAG_CHIP_P5)
		return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
	return HW_HASH_INDEX_SIZE;
}

@@ -1288,7 +1299,7 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
{
	struct bnxt *bp = netdev_priv(dev);
	struct bnxt_vnic_info *vnic;
	int i = 0;
	u32 i, tbl_size;

	if (hfunc)
		*hfunc = ETH_RSS_HASH_TOP;
@@ -1297,9 +1308,10 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
		return 0;

	vnic = &bp->vnic_info[0];
	if (indir && vnic->rss_table) {
		for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
			indir[i] = le16_to_cpu(vnic->rss_table[i]);
	if (indir && bp->rss_indir_tbl) {
		tbl_size = bnxt_get_rxfh_indir_size(dev);
		for (i = 0; i < tbl_size; i++)
			indir[i] = bp->rss_indir_tbl[i];
	}

	if (key && vnic->rss_hash_key)
@@ -1308,6 +1320,35 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
	return 0;
}

static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
			 const u8 *key, const u8 hfunc)
{
	struct bnxt *bp = netdev_priv(dev);
	int rc = 0;

	if (hfunc && hfunc != ETH_RSS_HASH_TOP)
		return -EOPNOTSUPP;

	if (key)
		return -EOPNOTSUPP;

	if (indir) {
		u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);

		for (i = 0; i < tbl_size; i++)
			bp->rss_indir_tbl[i] = indir[i];
		pad = bp->rss_indir_tbl_entries - tbl_size;
		if (pad)
			memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
	}

	if (netif_running(bp->dev)) {
		bnxt_close_nic(bp, false, false);
		rc = bnxt_open_nic(bp, false, false);
	}
	return rc;
}

static void bnxt_get_drvinfo(struct net_device *dev,
			     struct ethtool_drvinfo *info)
{
@@ -3614,6 +3655,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
	.get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
	.get_rxfh_key_size      = bnxt_get_rxfh_key_size,
	.get_rxfh               = bnxt_get_rxfh,
	.set_rxfh		= bnxt_set_rxfh,
	.flash_device		= bnxt_flash_device,
	.get_eeprom_len         = bnxt_get_eeprom_len,
	.get_eeprom             = bnxt_get_eeprom,
+1 −0
Original line number Diff line number Diff line
@@ -86,6 +86,7 @@ struct hwrm_dbg_cmn_output {

extern const struct ethtool_ops bnxt_ethtool_ops;

u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);