Commit 48423dd7 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'hns3-next' into net-next



Huazhong Tan says:

====================
This patch-set includes some new features for the HNS3 ethernet
controller driver.

[patch 01/06] adds support for configuring VF link status on the host.

[patch 02/06] adds support for configuring VF spoof check.

[patch 03/06] adds support for configuring VF trust.

[patch 04/06] adds support for configuring VF bandwidth on the host.

[patch 05/06] adds support for configuring VF MAC on the host.

[patch 06/06] adds support for tx-scatter-gather-fraglist.
====================

Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
parents 11fc7d5a 8ae10cfb
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@ enum HCLGE_MBX_OPCODE {
	HCLGE_MBX_GET_LINK_MODE,	/* (VF -> PF) get the link mode of pf */
	HCLGE_MBX_PUSH_VLAN_INFO,	/* (PF -> VF) push port base vlan */
	HCLGE_MBX_GET_MEDIA_TYPE,       /* (VF -> PF) get media type */
	HCLGE_MBX_PUSH_PROMISC_INFO,	/* (PF -> VF) push vf promisc info */

	HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
	HCLGE_MBX_PUSH_LINK_STATUS,	/* (M7 -> PF) get port link status */
+23 −0
Original line number Diff line number Diff line
@@ -364,6 +364,19 @@ struct hnae3_ae_dev {
 *   Enable/disable HW GRO
 * add_arfs_entry
 *   Check the 5-tuples of flow, and create flow director rule
 * get_vf_config
 *   Get the VF configuration setting by the host
 * set_vf_link_state
 *   Set VF link status
 * set_vf_spoofchk
 *   Enable/disable spoof check for specified vf
 * set_vf_trust
 *   Enable/disable trust for specified vf, if the vf being trusted, then
 *   it can enable promisc mode
 * set_vf_rate
 *   Set the max tx rate of specified vf.
 * set_vf_mac
 *   Configure the default MAC for specified VF
 */
struct hnae3_ae_ops {
	int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -529,6 +542,16 @@ struct hnae3_ae_ops {
	int (*mac_connect_phy)(struct hnae3_handle *handle);
	void (*mac_disconnect_phy)(struct hnae3_handle *handle);
	void (*restore_vlan_table)(struct hnae3_handle *handle);
	int (*get_vf_config)(struct hnae3_handle *handle, int vf,
			     struct ifla_vf_info *ivf);
	int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
				 int link_state);
	int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf,
			       bool enable);
	int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable);
	int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
			   int min_tx_rate, int max_tx_rate, bool force);
	int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
};

struct hnae3_dcb_ops {
+258 −97
Original line number Diff line number Diff line
@@ -681,7 +681,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
		return 0;

	ret = skb_cow_head(skb, 0);
	if (unlikely(ret))
	if (unlikely(ret < 0))
		return ret;

	l3.hdr = skb_network_header(skb);
@@ -962,14 +962,6 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
	return 0;
}

static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
{
	/* Config bd buffer end */
	if (!!frag_end)
		hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U);
	hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
}

static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
			     struct sk_buff *skb)
{
@@ -1062,7 +1054,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
		skb_reset_mac_len(skb);

		ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
		if (unlikely(ret)) {
		if (unlikely(ret < 0)) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_l4_proto_err++;
			u64_stats_update_end(&ring->syncp);
@@ -1072,7 +1064,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
		ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
				      &type_cs_vlan_tso,
				      &ol_type_vlan_len_msec);
		if (unlikely(ret)) {
		if (unlikely(ret < 0)) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_l2l3l4_err++;
			u64_stats_update_end(&ring->syncp);
@@ -1081,7 +1073,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,

		ret = hns3_set_tso(skb, &paylen, &mss,
				   &type_cs_vlan_tso);
		if (unlikely(ret)) {
		if (unlikely(ret < 0)) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_tso_err++;
			u64_stats_update_end(&ring->syncp);
@@ -1102,9 +1094,10 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
}

static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
			  unsigned int size, int frag_end,
			  enum hns_desc_type type)
			  unsigned int size, enum hns_desc_type type)
{
#define HNS3_LIKELY_BD_NUM	1

	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
	struct hns3_desc *desc = &ring->desc[ring->next_to_use];
	struct device *dev = ring_to_dev(ring);
@@ -1118,7 +1111,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
		int ret;

		ret = hns3_fill_skb_desc(ring, skb, desc);
		if (unlikely(ret))
		if (unlikely(ret < 0))
			return ret;

		dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1137,19 +1130,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
	desc_cb->length = size;

	if (likely(size <= HNS3_MAX_BD_SIZE)) {
		u16 bdtp_fe_sc_vld_ra_ri = 0;

		desc_cb->priv = priv;
		desc_cb->dma = dma;
		desc_cb->type = type;
		desc->addr = cpu_to_le64(dma);
		desc->tx.send_size = cpu_to_le16(size);
		hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
		desc->tx.bdtp_fe_sc_vld_ra_ri =
			cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
			cpu_to_le16(BIT(HNS3_TXD_VLD_B));

		ring_ptr_move_fw(ring, next_to_use);
		return 0;
		return HNS3_LIKELY_BD_NUM;
	}

	frag_buf_num = hns3_tx_bd_count(size);
@@ -1158,8 +1148,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,

	/* When frag size is bigger than hardware limit, split this frag */
	for (k = 0; k < frag_buf_num; k++) {
		u16 bdtp_fe_sc_vld_ra_ri = 0;

		/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
		desc_cb->priv = priv;
		desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1170,11 +1158,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
		desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
		desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
				     (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
		hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
				       frag_end && (k == frag_buf_num - 1) ?
						1 : 0);
		desc->tx.bdtp_fe_sc_vld_ra_ri =
				cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
				cpu_to_le16(BIT(HNS3_TXD_VLD_B));

		/* move ring pointer to next */
		ring_ptr_move_fw(ring, next_to_use);
@@ -1183,23 +1168,78 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
		desc = &ring->desc[ring->next_to_use];
	}

	return 0;
	return frag_buf_num;
}

static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
				    unsigned int bd_num)
{
	unsigned int bd_num;
	unsigned int size;
	int i;

	/* if the total len is within the max bd limit */
	if (likely(skb->len <= HNS3_MAX_BD_SIZE))
		return skb_shinfo(skb)->nr_frags + 1;
	size = skb_headlen(skb);
	while (size > HNS3_MAX_BD_SIZE) {
		bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
		size -= HNS3_MAX_BD_SIZE;

	bd_num = hns3_tx_bd_count(skb_headlen(skb));
		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
	}

	if (size) {
		bd_size[bd_num++] = size;
		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		bd_num += hns3_tx_bd_count(skb_frag_size(frag));
		size = skb_frag_size(frag);
		if (!size)
			continue;

		while (size > HNS3_MAX_BD_SIZE) {
			bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
			size -= HNS3_MAX_BD_SIZE;

			if (bd_num > HNS3_MAX_TSO_BD_NUM)
				return bd_num;
		}

		bd_size[bd_num++] = size;
		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
	}

	return bd_num;
}

static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size)
{
	struct sk_buff *frag_skb;
	unsigned int bd_num = 0;

	/* If the total len is within the max bd limit */
	if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
		   skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM))
		return skb_shinfo(skb)->nr_frags + 1U;

	/* The below case will always be linearized, return
	 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
	 */
	if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
		     (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)))
		return HNS3_MAX_TSO_BD_NUM + 1U;

	bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);

	if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
		return bd_num;

	skb_walk_frags(skb, frag_skb) {
		bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
	}

	return bd_num;
@@ -1218,26 +1258,26 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
 * 7 frags to to be larger than gso header len + mss, and the remaining
 * continuous 7 frags to be larger than MSS except the last 7 frags.
 */
static bool hns3_skb_need_linearized(struct sk_buff *skb)
static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
				     unsigned int bd_num)
{
	int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
	unsigned int tot_len = 0;
	int i;

	for (i = 0; i < bd_limit; i++)
		tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
	for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++)
		tot_len += bd_size[i];

	/* ensure headlen + the first 7 frags is greater than mss + header
	 * and the first 7 frags is greater than mss.
	 */
	if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
	    hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
	/* ensure the first 8 frags is greater than mss + header */
	if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] <
	    skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
		return true;

	/* ensure the remaining continuous 7 buffer is greater than mss */
	for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
		tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
		tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
	/* ensure every continuous 7 buffer is greater than mss
	 * except the last one.
	 */
	for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) {
		tot_len -= bd_size[i];
		tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U];

		if (tot_len < skb_shinfo(skb)->gso_size)
			return true;
@@ -1249,15 +1289,16 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb)
static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
				  struct sk_buff **out_skb)
{
	unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
	struct sk_buff *skb = *out_skb;
	unsigned int bd_num;

	bd_num = hns3_nic_bd_num(skb);
	if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
	bd_num = hns3_tx_bd_num(skb, bd_size);
	if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
		struct sk_buff *new_skb;

		if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
		    !hns3_skb_need_linearized(skb))
		if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
		    !hns3_skb_need_linearized(skb, bd_size, bd_num))
			goto out;

		/* manual split the send packet */
@@ -1267,9 +1308,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
		dev_kfree_skb_any(skb);
		*out_skb = new_skb;

		bd_num = hns3_nic_bd_num(new_skb);
		if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
		    (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
		bd_num = hns3_tx_bd_count(new_skb->len);
		if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
		    (!skb_is_gso(new_skb) &&
		     bd_num > HNS3_MAX_NON_TSO_BD_NUM))
			return -ENOMEM;

		u64_stats_update_begin(&ring->syncp);
@@ -1314,6 +1356,37 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
	}
}

static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
				 struct sk_buff *skb, enum hns_desc_type type)
{
	unsigned int size = skb_headlen(skb);
	int i, ret, bd_num = 0;

	if (size) {
		ret = hns3_fill_desc(ring, skb, size, type);
		if (unlikely(ret < 0))
			return ret;

		bd_num += ret;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		size = skb_frag_size(frag);
		if (!size)
			continue;

		ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
		if (unlikely(ret < 0))
			return ret;

		bd_num += ret;
	}

	return bd_num;
}

netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -1321,58 +1394,54 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
		&tx_ring_data(priv, skb->queue_mapping);
	struct hns3_enet_ring *ring = ring_data->ring;
	struct netdev_queue *dev_queue;
	skb_frag_t *frag;
	int next_to_use_head;
	int buf_num;
	int seg_num;
	int size;
	int pre_ntu, next_to_use_head;
	struct sk_buff *frag_skb;
	int bd_num = 0;
	int ret;
	int i;

	/* Prefetch the data used later */
	prefetch(skb->data);

	buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
	if (unlikely(buf_num <= 0)) {
		if (buf_num == -EBUSY) {
	ret = hns3_nic_maybe_stop_tx(ring, &skb);
	if (unlikely(ret <= 0)) {
		if (ret == -EBUSY) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_busy++;
			u64_stats_update_end(&ring->syncp);
			goto out_net_tx_busy;
		} else if (buf_num == -ENOMEM) {
		} else if (ret == -ENOMEM) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.sw_err_cnt++;
			u64_stats_update_end(&ring->syncp);
		}

		hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
		hns3_rl_err(netdev, "xmit error: %d!\n", ret);
		goto out_err_tx_ok;
	}

	/* No. of segments (plus a header) */
	seg_num = skb_shinfo(skb)->nr_frags + 1;
	/* Fill the first part */
	size = skb_headlen(skb);

	next_to_use_head = ring->next_to_use;

	ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
			     DESC_TYPE_SKB);
	if (unlikely(ret))
	ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
	if (unlikely(ret < 0))
		goto fill_err;

	/* Fill the fragments */
	for (i = 1; i < seg_num; i++) {
		frag = &skb_shinfo(skb)->frags[i - 1];
		size = skb_frag_size(frag);
	bd_num += ret;

		ret = hns3_fill_desc(ring, frag, size,
				     seg_num - 1 == i ? 1 : 0,
				     DESC_TYPE_PAGE);
	if (!skb_has_frag_list(skb))
		goto out;

		if (unlikely(ret))
	skb_walk_frags(skb, frag_skb) {
		ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
		if (unlikely(ret < 0))
			goto fill_err;

		bd_num += ret;
	}
out:
	pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
					(ring->desc_num - 1);
	ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
				cpu_to_le16(BIT(HNS3_TXD_FE_B));

	/* Complete translate all packets */
	dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
@@ -1380,7 +1449,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)

	wmb(); /* Commit all data before submit */

	hnae3_queue_xmit(ring->tqp, buf_num);
	hnae3_queue_xmit(ring->tqp, bd_num);

	return NETDEV_TX_OK;

@@ -1413,6 +1482,16 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
		return 0;
	}

	/* For VF device, if there is a perm_addr, then the user will not
	 * be allowed to change the address.
	 */
	if (!hns3_is_phys_func(h->pdev) &&
	    !is_zero_ether_addr(netdev->perm_addr)) {
		netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
			   netdev->perm_addr, mac_addr->sa_data);
		return -EPERM;
	}

	ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
	if (ret) {
		netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
@@ -1643,6 +1722,29 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
	return ret;
}

static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
{
	struct hnae3_handle *handle = hns3_get_handle(netdev);

	if (hns3_nic_resetting(netdev))
		return -EBUSY;

	if (!handle->ae_algo->ops->set_vf_spoofchk)
		return -EOPNOTSUPP;

	return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
}

static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
{
	struct hnae3_handle *handle = hns3_get_handle(netdev);

	if (!handle->ae_algo->ops->set_vf_trust)
		return -EOPNOTSUPP;

	return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
}

static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct hnae3_handle *h = hns3_get_handle(netdev);
@@ -1805,6 +1907,57 @@ static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
#endif

static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
				  struct ifla_vf_info *ivf)
{
	struct hnae3_handle *h = hns3_get_handle(ndev);

	if (!h->ae_algo->ops->get_vf_config)
		return -EOPNOTSUPP;

	return h->ae_algo->ops->get_vf_config(h, vf, ivf);
}

static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
				      int link_state)
{
	struct hnae3_handle *h = hns3_get_handle(ndev);

	if (!h->ae_algo->ops->set_vf_link_state)
		return -EOPNOTSUPP;

	return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
}

static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
				int min_tx_rate, int max_tx_rate)
{
	struct hnae3_handle *h = hns3_get_handle(ndev);

	if (!h->ae_algo->ops->set_vf_rate)
		return -EOPNOTSUPP;

	return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
					    false);
}

static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
	struct hnae3_handle *h = hns3_get_handle(netdev);

	if (!h->ae_algo->ops->set_vf_mac)
		return -EOPNOTSUPP;

	if (is_multicast_ether_addr(mac)) {
		netdev_err(netdev,
			   "Invalid MAC:%pM specified. Could not set MAC\n",
			   mac);
		return -EINVAL;
	}

	return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
}

static const struct net_device_ops hns3_nic_netdev_ops = {
	.ndo_open		= hns3_nic_net_open,
	.ndo_stop		= hns3_nic_net_stop,
@@ -1820,10 +1973,15 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
	.ndo_vlan_rx_add_vid	= hns3_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= hns3_vlan_rx_kill_vid,
	.ndo_set_vf_vlan	= hns3_ndo_set_vf_vlan,
	.ndo_set_vf_spoofchk	= hns3_set_vf_spoofchk,
	.ndo_set_vf_trust	= hns3_set_vf_trust,
#ifdef CONFIG_RFS_ACCEL
	.ndo_rx_flow_steer	= hns3_rx_flow_steer,
#endif

	.ndo_get_vf_config	= hns3_nic_get_vf_config,
	.ndo_set_vf_link_state	= hns3_nic_set_vf_link_state,
	.ndo_set_vf_rate	= hns3_nic_set_vf_rate,
	.ndo_set_vf_mac		= hns3_nic_set_vf_mac,
};

bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -2069,9 +2227,8 @@ static void hns3_set_default_feature(struct net_device *netdev)
		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;

	netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
		NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;

	netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;

@@ -2081,21 +2238,24 @@ static void hns3_set_default_feature(struct net_device *netdev)
		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
		NETIF_F_FRAGLIST;

	netdev->vlan_features |=
		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
		NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
		NETIF_F_FRAGLIST;

	netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
		NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
		NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
		NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
		NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
		NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
		NETIF_F_FRAGLIST;

	if (pdev->revision >= 0x21) {
		netdev->hw_features |= NETIF_F_GRO_HW;
@@ -2358,7 +2518,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
	netdev_tx_completed_queue(dev_queue, pkts, bytes);

	if (unlikely(pkts && netif_carrier_ok(netdev) &&
		     (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
		     ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
		/* Make sure that anybody stopping the queue after this
		 * sees the new next_to_clean.
		 */
@@ -3743,23 +3903,24 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
}

/* Set mac addr if it is configured. or leave it to the AE driver */
static int hns3_init_mac_addr(struct net_device *netdev, bool init)
static int hns3_init_mac_addr(struct net_device *netdev)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct hnae3_handle *h = priv->ae_handle;
	u8 mac_addr_temp[ETH_ALEN];
	int ret = 0;

	if (h->ae_algo->ops->get_mac_addr && init) {
	if (h->ae_algo->ops->get_mac_addr)
		h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
		ether_addr_copy(netdev->dev_addr, mac_addr_temp);
	}

	/* Check if the MAC address is valid, if not get a random one */
	if (!is_valid_ether_addr(netdev->dev_addr)) {
	if (!is_valid_ether_addr(mac_addr_temp)) {
		eth_hw_addr_random(netdev);
		dev_warn(priv->dev, "using random MAC address %pM\n",
			 netdev->dev_addr);
	} else {
		ether_addr_copy(netdev->dev_addr, mac_addr_temp);
		ether_addr_copy(netdev->perm_addr, mac_addr_temp);
	}

	if (h->ae_algo->ops->set_mac_addr)
@@ -3863,7 +4024,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
	handle->kinfo.netdev = netdev;
	handle->priv = (void *)priv;

	hns3_init_mac_addr(netdev, true);
	hns3_init_mac_addr(netdev);

	hns3_set_default_feature(netdev);

@@ -4331,7 +4492,7 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
	bool vlan_filter_enable;
	int ret;

	ret = hns3_init_mac_addr(netdev, false);
	ret = hns3_init_mac_addr(netdev);
	if (ret)
		return ret;

+8 −4
Original line number Diff line number Diff line
@@ -76,7 +76,7 @@ enum hns3_nic_state {
#define HNS3_RING_NAME_LEN			16
#define HNS3_BUFFER_SIZE_2048			2048
#define HNS3_RING_MAX_PENDING			32760
#define HNS3_RING_MIN_PENDING			24
#define HNS3_RING_MIN_PENDING			72
#define HNS3_RING_BD_MULTIPLE			8
/* max frame size of mac */
#define HNS3_MAC_MAX_FRAME			9728
@@ -195,9 +195,13 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED			1

#define HNS3_MAX_BD_SIZE			65535
#define HNS3_MAX_BD_NUM_NORMAL			8
#define HNS3_MAX_BD_NUM_TSO			63
#define HNS3_MAX_BD_PER_PKT			MAX_SKB_FRAGS
#define HNS3_MAX_NON_TSO_BD_NUM			8U
#define HNS3_MAX_TSO_BD_NUM			63U
#define HNS3_MAX_TSO_SIZE \
	(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)

#define HNS3_MAX_NON_TSO_SIZE \
	(HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM)

#define HNS3_VECTOR_GL0_OFFSET			0x100
#define HNS3_VECTOR_GL1_OFFSET			0x200
+1 −4
Original line number Diff line number Diff line
@@ -244,7 +244,7 @@ enum hclge_opcode_type {
	/* QCN commands */
	HCLGE_OPC_QCN_MOD_CFG		= 0x1A01,
	HCLGE_OPC_QCN_GRP_TMPLT_CFG	= 0x1A02,
	HCLGE_OPC_QCN_SHAPPING_IR_CFG	= 0x1A03,
	HCLGE_OPC_QCN_SHAPPING_CFG	= 0x1A03,
	HCLGE_OPC_QCN_SHAPPING_BS_CFG	= 0x1A04,
	HCLGE_OPC_QCN_QSET_LINK_CFG	= 0x1A05,
	HCLGE_OPC_QCN_RP_STATUS_GET	= 0x1A06,
@@ -1090,9 +1090,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
				enum hclge_opcode_type opcode, bool is_read);
void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);

int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
			       struct hclge_promisc_param *param);

enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
					   struct hclge_desc *desc);
enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
Loading