Commit 632d1a48 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'hns3-next'



Huazhong Tan says:

====================
net: hns3: some code optimizations & cleanups & bugfixes

This patch-set includes code optimizations, cleanups and bugfixes for
the HNS3 ethernet controller driver.

[patch 1/12] logs more detail error info for ROCE RAS errors.

[patch 2/12] fixes a wrong size issue for mailbox responding.

[patch 3/12] makes HW GRO handing compliant with SW one.

[patch 4/12] refactors hns3_get_new_int_gl.

[patch 5/12] adds handling for VF's over_8bd_nfe_err.

[patch 6/12 - 12/12] adds some code optimizations and cleanups, to
make the code more readable and compliant with some static code
analysis tools, these modifications do not change the logic of
the code.

Change log:
V1->V2: fixes comment from David Miller.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 900d96e4 9b2f3477
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -69,7 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
};
};


#define HCLGE_MBX_MAX_MSG_SIZE	16
#define HCLGE_MBX_MAX_MSG_SIZE	16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE	16
#define HCLGE_MBX_MAX_RESP_DATA_SIZE	8
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM	3
#define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM	3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM	3
#define HCLGE_MBX_RING_NODE_VARIABLE_NUM	3


+5 −16
Original line number Original line Diff line number Diff line
@@ -16,14 +16,10 @@ static LIST_HEAD(hnae3_ae_dev_list);
 */
 */
static DEFINE_MUTEX(hnae3_common_lock);
static DEFINE_MUTEX(hnae3_common_lock);


static bool hnae3_client_match(enum hnae3_client_type client_type,
static bool hnae3_client_match(enum hnae3_client_type client_type)
			       enum hnae3_dev_type dev_type)
{
{
	if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC ||
	if (client_type == HNAE3_CLIENT_KNIC ||
					     client_type == HNAE3_CLIENT_ROCE))
	    client_type == HNAE3_CLIENT_ROCE)
		return true;

	if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC)
		return true;
		return true;


	return false;
	return false;
@@ -39,9 +35,6 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
	case HNAE3_CLIENT_KNIC:
	case HNAE3_CLIENT_KNIC:
		hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
		hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
		break;
		break;
	case HNAE3_CLIENT_UNIC:
		hnae3_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited);
		break;
	case HNAE3_CLIENT_ROCE:
	case HNAE3_CLIENT_ROCE:
		hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
		hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
		break;
		break;
@@ -61,10 +54,6 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
		inited = hnae3_get_bit(ae_dev->flag,
		inited = hnae3_get_bit(ae_dev->flag,
				       HNAE3_KNIC_CLIENT_INITED_B);
				       HNAE3_KNIC_CLIENT_INITED_B);
		break;
		break;
	case HNAE3_CLIENT_UNIC:
		inited = hnae3_get_bit(ae_dev->flag,
				       HNAE3_UNIC_CLIENT_INITED_B);
		break;
	case HNAE3_CLIENT_ROCE:
	case HNAE3_CLIENT_ROCE:
		inited = hnae3_get_bit(ae_dev->flag,
		inited = hnae3_get_bit(ae_dev->flag,
				       HNAE3_ROCE_CLIENT_INITED_B);
				       HNAE3_ROCE_CLIENT_INITED_B);
@@ -82,7 +71,7 @@ static int hnae3_init_client_instance(struct hnae3_client *client,
	int ret;
	int ret;


	/* check if this client matches the type of ae_dev */
	/* check if this client matches the type of ae_dev */
	if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
	if (!(hnae3_client_match(client->type) &&
	      hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
	      hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
		return 0;
		return 0;
	}
	}
@@ -99,7 +88,7 @@ static void hnae3_uninit_client_instance(struct hnae3_client *client,
					 struct hnae3_ae_dev *ae_dev)
					 struct hnae3_ae_dev *ae_dev)
{
{
	/* check if this client matches the type of ae_dev */
	/* check if this client matches the type of ae_dev */
	if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
	if (!(hnae3_client_match(client->type) &&
	      hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
	      hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
		return;
		return;


+0 −7
Original line number Original line Diff line number Diff line
@@ -102,15 +102,9 @@ enum hnae3_loop {


enum hnae3_client_type {
enum hnae3_client_type {
	HNAE3_CLIENT_KNIC,
	HNAE3_CLIENT_KNIC,
	HNAE3_CLIENT_UNIC,
	HNAE3_CLIENT_ROCE,
	HNAE3_CLIENT_ROCE,
};
};


enum hnae3_dev_type {
	HNAE3_DEV_KNIC,
	HNAE3_DEV_UNIC,
};

/* mac media type */
/* mac media type */
enum hnae3_media_type {
enum hnae3_media_type {
	HNAE3_MEDIA_TYPE_UNKNOWN,
	HNAE3_MEDIA_TYPE_UNKNOWN,
@@ -220,7 +214,6 @@ struct hnae3_ae_dev {
	struct list_head node;
	struct list_head node;
	u32 flag;
	u32 flag;
	u8 override_pci_need_reset; /* fix to stop multiple reset happening */
	u8 override_pci_need_reset; /* fix to stop multiple reset happening */
	enum hnae3_dev_type dev_type;
	enum hnae3_reset_type reset_type;
	enum hnae3_reset_type reset_type;
	void *priv;
	void *priv;
};
};
+4 −8
Original line number Original line Diff line number Diff line
@@ -4,8 +4,7 @@
#include "hnae3.h"
#include "hnae3.h"
#include "hns3_enet.h"
#include "hns3_enet.h"


static
static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
{
	struct hnae3_handle *h = hns3_get_handle(ndev);
	struct hnae3_handle *h = hns3_get_handle(ndev);


@@ -18,8 +17,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
	return -EOPNOTSUPP;
	return -EOPNOTSUPP;
}
}


static
static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
{
	struct hnae3_handle *h = hns3_get_handle(ndev);
	struct hnae3_handle *h = hns3_get_handle(ndev);


@@ -32,8 +30,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
	return -EOPNOTSUPP;
	return -EOPNOTSUPP;
}
}


static
static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
{
	struct hnae3_handle *h = hns3_get_handle(ndev);
	struct hnae3_handle *h = hns3_get_handle(ndev);


@@ -46,8 +43,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
	return -EOPNOTSUPP;
	return -EOPNOTSUPP;
}
}


static
static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
{
	struct hnae3_handle *h = hns3_get_handle(ndev);
	struct hnae3_handle *h = hns3_get_handle(ndev);


+108 −91
Original line number Original line Diff line number Diff line
@@ -17,6 +17,7 @@
#include <linux/sctp.h>
#include <linux/sctp.h>
#include <linux/vermagic.h>
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/gre.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/tcp.h>
#include <net/vxlan.h>
#include <net/vxlan.h>
@@ -138,8 +139,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
		tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
		tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';


		ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
		ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
				  tqp_vectors->name,
				  tqp_vectors->name, tqp_vectors);
				       tqp_vectors);
		if (ret) {
		if (ret) {
			netdev_err(priv->netdev, "request irq(%d) fail\n",
			netdev_err(priv->netdev, "request irq(%d) fail\n",
				   tqp_vectors->vector_irq);
				   tqp_vectors->vector_irq);
@@ -276,8 +276,7 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
	ret = netif_set_real_num_tx_queues(netdev, queue_size);
	ret = netif_set_real_num_tx_queues(netdev, queue_size);
	if (ret) {
	if (ret) {
		netdev_err(netdev,
		netdev_err(netdev,
			   "netif_set_real_num_tx_queues fail, ret=%d!\n",
			   "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
			   ret);
		return ret;
		return ret;
	}
	}


@@ -372,7 +371,7 @@ static int hns3_nic_net_up(struct net_device *netdev)
	/* get irq resource for all vectors */
	/* get irq resource for all vectors */
	ret = hns3_nic_init_irq(priv);
	ret = hns3_nic_init_irq(priv);
	if (ret) {
	if (ret) {
		netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
		netdev_err(netdev, "init irq failed! ret=%d\n", ret);
		goto free_rmap;
		goto free_rmap;
	}
	}


@@ -448,16 +447,13 @@ static int hns3_nic_net_open(struct net_device *netdev)


	ret = hns3_nic_net_up(netdev);
	ret = hns3_nic_net_up(netdev);
	if (ret) {
	if (ret) {
		netdev_err(netdev,
		netdev_err(netdev, "net up fail, ret=%d!\n", ret);
			   "hns net up fail, ret=%d!\n", ret);
		return ret;
		return ret;
	}
	}


	kinfo = &h->kinfo;
	kinfo = &h->kinfo;
	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
		netdev_set_prio_tc_map(netdev, i,
		netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
				       kinfo->prio_tc[i]);
	}


	if (h->ae_algo->ops->set_timer_task)
	if (h->ae_algo->ops->set_timer_task)
		h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
		h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
@@ -662,7 +658,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
	if (l3.v4->version == 4)
	if (l3.v4->version == 4)
		l3.v4->check = 0;
		l3.v4->check = 0;


	/* tunnel packet.*/
	/* tunnel packet */
	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
					 SKB_GSO_GRE_CSUM |
					 SKB_GSO_GRE_CSUM |
					 SKB_GSO_UDP_TUNNEL |
					 SKB_GSO_UDP_TUNNEL |
@@ -799,7 +795,7 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);


	il2_hdr = skb_inner_mac_header(skb);
	il2_hdr = skb_inner_mac_header(skb);
	/* compute OL4 header size, defined in 4 Bytes. */
	/* compute OL4 header size, defined in 4 Bytes */
	l4_len = il2_hdr - l4.hdr;
	l4_len = il2_hdr - l4.hdr;
	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
	hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);


@@ -1059,8 +1055,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
		/* Set txbd */
		/* Set txbd */
		desc->tx.ol_type_vlan_len_msec =
		desc->tx.ol_type_vlan_len_msec =
			cpu_to_le32(ol_type_vlan_len_msec);
			cpu_to_le32(ol_type_vlan_len_msec);
		desc->tx.type_cs_vlan_tso_len =
		desc->tx.type_cs_vlan_tso_len =	cpu_to_le32(type_cs_vlan_tso);
			cpu_to_le32(type_cs_vlan_tso);
		desc->tx.paylen = cpu_to_le32(paylen);
		desc->tx.paylen = cpu_to_le32(paylen);
		desc->tx.mss = cpu_to_le16(mss);
		desc->tx.mss = cpu_to_le16(mss);
		desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
		desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
@@ -1119,7 +1114,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
		desc->tx.bdtp_fe_sc_vld_ra_ri =
		desc->tx.bdtp_fe_sc_vld_ra_ri =
				cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
				cpu_to_le16(bdtp_fe_sc_vld_ra_ri);


		/* move ring pointer to next.*/
		/* move ring pointer to next */
		ring_ptr_move_fw(ring, next_to_use);
		ring_ptr_move_fw(ring, next_to_use);


		desc_cb = &ring->desc_cb[ring->next_to_use];
		desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -1827,8 +1822,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	struct hnae3_ae_dev *ae_dev;
	struct hnae3_ae_dev *ae_dev;
	int ret;
	int ret;


	ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
	ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
			      GFP_KERNEL);
	if (!ae_dev) {
	if (!ae_dev) {
		ret = -ENOMEM;
		ret = -ENOMEM;
		return ret;
		return ret;
@@ -1836,7 +1830,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)


	ae_dev->pdev = pdev;
	ae_dev->pdev = pdev;
	ae_dev->flag = ent->driver_data;
	ae_dev->flag = ent->driver_data;
	ae_dev->dev_type = HNAE3_DEV_KNIC;
	ae_dev->reset_type = HNAE3_NONE_RESET;
	ae_dev->reset_type = HNAE3_NONE_RESET;
	hns3_get_dev_capability(pdev, ae_dev);
	hns3_get_dev_capability(pdev, ae_dev);
	pci_set_drvdata(pdev, ae_dev);
	pci_set_drvdata(pdev, ae_dev);
@@ -2209,8 +2202,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
{
	ring->desc_cb[i].reuse_flag = 0;
	ring->desc_cb[i].reuse_flag = 0;
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
		+ ring->desc_cb[i].page_offset);
					 ring->desc_cb[i].page_offset);
	ring->desc[i].rx.bd_base_info = 0;
	ring->desc[i].rx.bd_base_info = 0;
}
}


@@ -2312,8 +2305,8 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
	return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
}


static void
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
				      int cleand_count)
{
{
	struct hns3_desc_cb *desc_cb;
	struct hns3_desc_cb *desc_cb;
	struct hns3_desc_cb res_cbs;
	struct hns3_desc_cb res_cbs;
@@ -2366,7 +2359,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	/* Avoid re-using remote pages, or the stack is still using the page
	/* Avoid re-using remote pages, or the stack is still using the page
	 * when page_offset rollback to zero, flag default unreuse
	 * when page_offset rollback to zero, flag default unreuse
	 */
	 */
	if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
	if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
	    (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
	    (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
		return;
		return;


@@ -2384,13 +2377,13 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	}
	}
}
}


static int hns3_gro_complete(struct sk_buff *skb)
static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
{
{
	__be16 type = skb->protocol;
	__be16 type = skb->protocol;
	struct tcphdr *th;
	struct tcphdr *th;
	int depth = 0;
	int depth = 0;


	while (type == htons(ETH_P_8021Q)) {
	while (eth_type_vlan(type)) {
		struct vlan_hdr *vh;
		struct vlan_hdr *vh;


		if ((depth + VLAN_HLEN) > skb_headlen(skb))
		if ((depth + VLAN_HLEN) > skb_headlen(skb))
@@ -2401,10 +2394,24 @@ static int hns3_gro_complete(struct sk_buff *skb)
		depth += VLAN_HLEN;
		depth += VLAN_HLEN;
	}
	}


	skb_set_network_header(skb, depth);

	if (type == htons(ETH_P_IP)) {
	if (type == htons(ETH_P_IP)) {
		const struct iphdr *iph = ip_hdr(skb);

		depth += sizeof(struct iphdr);
		depth += sizeof(struct iphdr);
		skb_set_transport_header(skb, depth);
		th = tcp_hdr(skb);
		th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
					  iph->daddr, 0);
	} else if (type == htons(ETH_P_IPV6)) {
	} else if (type == htons(ETH_P_IPV6)) {
		const struct ipv6hdr *iph = ipv6_hdr(skb);

		depth += sizeof(struct ipv6hdr);
		depth += sizeof(struct ipv6hdr);
		skb_set_transport_header(skb, depth);
		th = tcp_hdr(skb);
		th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
					  &iph->daddr, 0);
	} else {
	} else {
		netdev_err(skb->dev,
		netdev_err(skb->dev,
			   "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
			   "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
@@ -2412,13 +2419,16 @@ static int hns3_gro_complete(struct sk_buff *skb)
		return -EFAULT;
		return -EFAULT;
	}
	}


	th = (struct tcphdr *)(skb->data + depth);
	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
	if (th->cwr)
	if (th->cwr)
		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;


	skb->ip_summed = CHECKSUM_UNNECESSARY;
	if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;


	skb->csum_start = (unsigned char *)th - skb->head;
	skb->csum_offset = offsetof(struct tcphdr, check);
	skb->ip_summed = CHECKSUM_PARTIAL;
	return 0;
	return 0;
}
}


@@ -2565,7 +2575,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
		memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));


		/* We can reuse buffer as-is, just make sure it is local */
		/* We can reuse buffer as-is, just make sure it is local */
		if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
		if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
			desc_cb->reuse_flag = 1;
			desc_cb->reuse_flag = 1;
		else /* This page cannot be reused so discard it */
		else /* This page cannot be reused so discard it */
			put_page(desc_cb->priv);
			put_page(desc_cb->priv);
@@ -2656,21 +2666,22 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
				     struct sk_buff *skb, u32 l234info,
				     struct sk_buff *skb, u32 l234info,
				     u32 bd_base_info, u32 ol_info)
				     u32 bd_base_info, u32 ol_info)
{
{
	u16 gro_count;
	u32 l3_type;
	u32 l3_type;


	gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
	skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
				    HNS3_RXD_GRO_COUNT_S);
						    HNS3_RXD_GRO_SIZE_M,
						    HNS3_RXD_GRO_SIZE_S);
	/* if there is no HW GRO, do not set gro params */
	/* if there is no HW GRO, do not set gro params */
	if (!gro_count) {
	if (!skb_shinfo(skb)->gso_size) {
		hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
		hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
		return 0;
		return 0;
	}
	}


	NAPI_GRO_CB(skb)->count = gro_count;
	NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
						  HNS3_RXD_GRO_COUNT_M,
						  HNS3_RXD_GRO_COUNT_S);


	l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
	l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
				  HNS3_RXD_L3ID_S);
	if (l3_type == HNS3_L3_TYPE_IPV4)
	if (l3_type == HNS3_L3_TYPE_IPV4)
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
	else if (l3_type == HNS3_L3_TYPE_IPV6)
	else if (l3_type == HNS3_L3_TYPE_IPV6)
@@ -2678,11 +2689,7 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
	else
	else
		return -EFAULT;
		return -EFAULT;


	skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
	return  hns3_gro_complete(skb, l234info);
						    HNS3_RXD_GRO_SIZE_M,
						    HNS3_RXD_GRO_SIZE_S);

	return  hns3_gro_complete(skb);
}
}


static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
@@ -2862,8 +2869,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
	return 0;
	return 0;
}
}


int hns3_clean_rx_ring(
int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
		struct hns3_enet_ring *ring, int budget,
		       void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
		       void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
@@ -2916,42 +2922,25 @@ int hns3_clean_rx_ring(
out:
out:
	/* Make all data has been write before submit */
	/* Make all data has been write before submit */
	if (clean_count + unused_count > 0)
	if (clean_count + unused_count > 0)
		hns3_nic_alloc_rx_buffers(ring,
		hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
					  clean_count + unused_count);


	return recv_pkts;
	return recv_pkts;
}
}


static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
{
{
	struct hns3_enet_tqp_vector *tqp_vector =
#define HNS3_RX_LOW_BYTE_RATE 10000
					ring_group->ring->tqp_vector;
#define HNS3_RX_MID_BYTE_RATE 20000
#define HNS3_RX_ULTRA_PACKET_RATE 40

	enum hns3_flow_level_range new_flow_level;
	enum hns3_flow_level_range new_flow_level;
	int packets_per_msecs;
	struct hns3_enet_tqp_vector *tqp_vector;
	int bytes_per_msecs;
	int packets_per_msecs, bytes_per_msecs;
	u32 time_passed_ms;
	u32 time_passed_ms;
	u16 new_int_gl;

	if (!tqp_vector->last_jiffies)
		return false;


	if (ring_group->total_packets == 0) {
	tqp_vector = ring_group->ring->tqp_vector;
		ring_group->coal.int_gl = HNS3_INT_GL_50K;
		ring_group->coal.flow_level = HNS3_FLOW_LOW;
		return true;
	}

	/* Simple throttlerate management
	 * 0-10MB/s   lower     (50000 ints/s)
	 * 10-20MB/s   middle    (20000 ints/s)
	 * 20-1249MB/s high      (18000 ints/s)
	 * > 40000pps  ultra     (8000 ints/s)
	 */
	new_flow_level = ring_group->coal.flow_level;
	new_int_gl = ring_group->coal.int_gl;
	time_passed_ms =
	time_passed_ms =
		jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
		jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);

	if (!time_passed_ms)
	if (!time_passed_ms)
		return false;
		return false;


@@ -2961,9 +2950,14 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
	do_div(ring_group->total_bytes, time_passed_ms);
	do_div(ring_group->total_bytes, time_passed_ms);
	bytes_per_msecs = ring_group->total_bytes;
	bytes_per_msecs = ring_group->total_bytes;


#define HNS3_RX_LOW_BYTE_RATE 10000
	new_flow_level = ring_group->coal.flow_level;
#define HNS3_RX_MID_BYTE_RATE 20000


	/* Simple throttlerate management
	 * 0-10MB/s   lower     (50000 ints/s)
	 * 10-20MB/s   middle    (20000 ints/s)
	 * 20-1249MB/s high      (18000 ints/s)
	 * > 40000pps  ultra     (8000 ints/s)
	 */
	switch (new_flow_level) {
	switch (new_flow_level) {
	case HNS3_FLOW_LOW:
	case HNS3_FLOW_LOW:
		if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
		if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
@@ -2983,13 +2977,40 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
		break;
		break;
	}
	}


#define HNS3_RX_ULTRA_PACKET_RATE 40

	if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
	if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
	    &tqp_vector->rx_group == ring_group)
	    &tqp_vector->rx_group == ring_group)
		new_flow_level = HNS3_FLOW_ULTRA;
		new_flow_level = HNS3_FLOW_ULTRA;


	switch (new_flow_level) {
	ring_group->total_bytes = 0;
	ring_group->total_packets = 0;
	ring_group->coal.flow_level = new_flow_level;

	return true;
}

static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
{
	struct hns3_enet_tqp_vector *tqp_vector;
	u16 new_int_gl;

	if (!ring_group->ring)
		return false;

	tqp_vector = ring_group->ring->tqp_vector;
	if (!tqp_vector->last_jiffies)
		return false;

	if (ring_group->total_packets == 0) {
		ring_group->coal.int_gl = HNS3_INT_GL_50K;
		ring_group->coal.flow_level = HNS3_FLOW_LOW;
		return true;
	}

	if (!hns3_get_new_flow_lvl(ring_group))
		return false;

	new_int_gl = ring_group->coal.int_gl;
	switch (ring_group->coal.flow_level) {
	case HNS3_FLOW_LOW:
	case HNS3_FLOW_LOW:
		new_int_gl = HNS3_INT_GL_50K;
		new_int_gl = HNS3_INT_GL_50K;
		break;
		break;
@@ -3006,9 +3027,6 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
		break;
		break;
	}
	}


	ring_group->total_bytes = 0;
	ring_group->total_packets = 0;
	ring_group->coal.flow_level = new_flow_level;
	if (new_int_gl != ring_group->coal.int_gl) {
	if (new_int_gl != ring_group->coal.int_gl) {
		ring_group->coal.int_gl = new_int_gl;
		ring_group->coal.int_gl = new_int_gl;
		return true;
		return true;
@@ -3309,6 +3327,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
	if (!vector)
	if (!vector)
		return -ENOMEM;
		return -ENOMEM;


	/* save the actual available vector number */
	vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
	vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);


	priv->vector_num = vector_num;
	priv->vector_num = vector_num;
@@ -3577,8 +3596,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
	struct hnae3_queue *q = ring->tqp;
	struct hnae3_queue *q = ring->tqp;


	if (!HNAE3_IS_TX_RING(ring)) {
	if (!HNAE3_IS_TX_RING(ring)) {
		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
			       (u32)dma);
		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
		hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
			       (u32)((dma >> 31) >> 1));
			       (u32)((dma >> 31) >> 1));


@@ -4024,8 +4042,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
					    ret);
					    ret);
				return ret;
				return ret;
			}
			}
			hns3_replace_buffer(ring, ring->next_to_use,
			hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
					    &res_cbs);
		}
		}
		ring_ptr_move_fw(ring, next_to_use);
		ring_ptr_move_fw(ring, next_to_use);
	}
	}
@@ -4200,7 +4217,7 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
		if (ret) {
		if (ret) {
			set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
			set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
			netdev_err(kinfo->netdev,
			netdev_err(kinfo->netdev,
				   "hns net up fail, ret=%d!\n", ret);
				   "net up fail, ret=%d!\n", ret);
			return ret;
			return ret;
		}
		}
	}
	}
Loading