Commit 11c6c0c2 authored by David S. Miller's avatar David S. Miller
Browse files


Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2018-11-20

This series contains updates to the ice driver only.

Akeem updates the driver to determine whether or not to do
auto-negotiation based on the VSI state.

Bruce cleans up the control queue code to remove duplicate code.  Take
advantage of some compiler optimizations by making some structures
constant, and also note that they cannot be modified.  Cleaned up
formatting issues and code comment that needed clarification.  Fixed a
potential NULL pointer dereference by adding a check.

Jaroslaw adds a check to verify if memory was allocated or not.

Yashaswini Raghuram fixes the driver to ensure we are not enabling the
LAN_EN flag if the MAC in the MAC-VLAN is a unicast MAC, so that the
unicast packets are not forwarded to the wire.

Dave fixes the return value of ice_napi_poll() to be more useful in
returning the work that was done and should only return 0 when no work
was done.

Anirudh does code comment cleanup, to make more consistent.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 51428fd6 f25dad19
Loading
Loading
Loading
Loading
+15 −15
Original line number Diff line number Diff line
@@ -96,14 +96,14 @@ extern const char ice_drv_ver[];
#define ice_for_each_vsi(pf, i) \
	for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)

/* Macros for each tx/rx ring in a VSI */
/* Macros for each Tx/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
	for ((i) = 0; (i) < (vsi)->num_txq; (i)++)

#define ice_for_each_rxq(vsi, i) \
	for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)

/* Macros for each allocated tx/rx ring whether used or not in a VSI */
/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
	for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)

@@ -183,8 +183,8 @@ struct ice_vsi {
	struct ice_sw *vsw;		 /* switch this VSI is on */
	struct ice_pf *back;		 /* back pointer to PF */
	struct ice_port_info *port_info; /* back pointer to port_info */
	struct ice_ring **rx_rings;	 /* rx ring array */
	struct ice_ring **tx_rings;	 /* tx ring array */
	struct ice_ring **rx_rings;	 /* Rx ring array */
	struct ice_ring **tx_rings;	 /* Tx ring array */
	struct ice_q_vector **q_vectors; /* q_vector array */

	irqreturn_t (*irq_handler)(int irq, void *data);
@@ -255,8 +255,8 @@ struct ice_q_vector {
	struct ice_ring_container tx;
	struct irq_affinity_notify affinity_notify;
	u16 v_idx;			/* index in the vsi->q_vector array. */
	u8 num_ring_tx;			/* total number of tx rings in vector */
	u8 num_ring_rx;			/* total number of rx rings in vector */
	u8 num_ring_tx;			/* total number of Tx rings in vector */
	u8 num_ring_rx;			/* total number of Rx rings in vector */
	char name[ICE_INT_NAME_STR_LEN];
	/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
	 * value to the device
@@ -308,10 +308,10 @@ struct ice_pf {
	u32 hw_oicr_idx;	/* Other interrupt cause vector HW index */
	u32 num_avail_hw_msix;	/* remaining HW MSIX vectors left unclaimed */
	u32 num_lan_msix;	/* Total MSIX vectors for base driver */
	u16 num_lan_tx;		/* num lan tx queues setup */
	u16 num_lan_rx;		/* num lan rx queues setup */
	u16 q_left_tx;		/* remaining num tx queues left unclaimed */
	u16 q_left_rx;		/* remaining num rx queues left unclaimed */
	u16 num_lan_tx;		/* num lan Tx queues setup */
	u16 num_lan_rx;		/* num lan Rx queues setup */
	u16 q_left_tx;		/* remaining num Tx queues left unclaimed */
	u16 q_left_rx;		/* remaining num Rx queues left unclaimed */
	u16 next_vsi;		/* Next free slot in pf->vsi[] - 0-based! */
	u16 num_alloc_vsi;
	u16 corer_count;	/* Core reset count */
+8 −8
Original line number Diff line number Diff line
@@ -1380,10 +1380,10 @@ struct ice_aq_desc {

/* error codes */
enum ice_aq_err {
	ICE_AQ_RC_OK		= 0,  /* success */
	ICE_AQ_RC_OK		= 0,  /* Success */
	ICE_AQ_RC_ENOMEM	= 9,  /* Out of memory */
	ICE_AQ_RC_EBUSY		= 12, /* Device or resource busy */
	ICE_AQ_RC_EEXIST	= 13, /* object already exists */
	ICE_AQ_RC_EEXIST	= 13, /* Object already exists */
	ICE_AQ_RC_ENOSPC	= 16, /* No space left or allocation failure */
};

+5 −8
Original line number Diff line number Diff line
@@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)

	INIT_LIST_HEAD(&sw->vsi_list_map_head);

	ice_init_def_sw_recp(hw);

	return 0;
	return ice_init_def_sw_recp(hw);
}

/**
@@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)

	hw->evb_veb = true;

	/* Query the allocated resources for tx scheduler */
	/* Query the allocated resources for Tx scheduler */
	status = ice_sched_query_res_alloc(hw);
	if (status) {
		ice_debug(hw, ICE_DBG_SCHED,
@@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
 * ice_copy_rxq_ctx_to_hw
 * @hw: pointer to the hardware structure
 * @ice_rxq_ctx: pointer to the rxq context
 * @rxq_index: the index of the rx queue
 * @rxq_index: the index of the Rx queue
 *
 * Copies rxq context from dense structure to hw register space
 */
@@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
 * ice_write_rxq_ctx
 * @hw: pointer to the hardware structure
 * @rlan_ctx: pointer to the rxq context
 * @rxq_index: the index of the rx queue
 * @rxq_index: the index of the Rx queue
 *
 * Converts rxq context from sparse to dense structure and then writes
 * it to hw register space
@@ -1715,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
 */
static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
{
	u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;

+78 −145
Original line number Diff line number Diff line
@@ -3,6 +3,26 @@

#include "ice_common.h"

#define ICE_CQ_INIT_REGS(qinfo, prefix)				\
do {								\
	(qinfo)->sq.head = prefix##_ATQH;			\
	(qinfo)->sq.tail = prefix##_ATQT;			\
	(qinfo)->sq.len = prefix##_ATQLEN;			\
	(qinfo)->sq.bah = prefix##_ATQBAH;			\
	(qinfo)->sq.bal = prefix##_ATQBAL;			\
	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
	(qinfo)->rq.head = prefix##_ARQH;			\
	(qinfo)->rq.tail = prefix##_ARQT;			\
	(qinfo)->rq.len = prefix##_ARQLEN;			\
	(qinfo)->rq.bah = prefix##_ARQBAH;			\
	(qinfo)->rq.bal = prefix##_ARQBAL;			\
	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
} while (0)

/**
 * ice_adminq_init_regs - Initialize AdminQ registers
 * @hw: pointer to the hardware structure
@@ -13,23 +33,7 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
{
	struct ice_ctl_q_info *cq = &hw->adminq;

	cq->sq.head = PF_FW_ATQH;
	cq->sq.tail = PF_FW_ATQT;
	cq->sq.len = PF_FW_ATQLEN;
	cq->sq.bah = PF_FW_ATQBAH;
	cq->sq.bal = PF_FW_ATQBAL;
	cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
	cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
	cq->sq.head_mask = PF_FW_ATQH_ATQH_M;

	cq->rq.head = PF_FW_ARQH;
	cq->rq.tail = PF_FW_ARQT;
	cq->rq.len = PF_FW_ARQLEN;
	cq->rq.bah = PF_FW_ARQBAH;
	cq->rq.bal = PF_FW_ARQBAL;
	cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
	cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
	cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
	ICE_CQ_INIT_REGS(cq, PF_FW);
}

/**
@@ -42,24 +46,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
{
	struct ice_ctl_q_info *cq = &hw->mailboxq;

	/* set head and tail registers in our local struct */
	cq->sq.head = PF_MBX_ATQH;
	cq->sq.tail = PF_MBX_ATQT;
	cq->sq.len = PF_MBX_ATQLEN;
	cq->sq.bah = PF_MBX_ATQBAH;
	cq->sq.bal = PF_MBX_ATQBAL;
	cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
	cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
	cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;

	cq->rq.head = PF_MBX_ARQH;
	cq->rq.tail = PF_MBX_ARQT;
	cq->rq.len = PF_MBX_ARQLEN;
	cq->rq.bah = PF_MBX_ARQBAH;
	cq->rq.bal = PF_MBX_ARQBAL;
	cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
	cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
	cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
	ICE_CQ_INIT_REGS(cq, PF_MBX);
}

/**
@@ -131,37 +118,20 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}

/**
 * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
 * ice_free_cq_ring - Free control queue ring
 * @hw: pointer to the hardware structure
 * @cq: pointer to the specific Control queue
 * @ring: pointer to the specific control queue ring
 *
 * This assumes the posted send buffers have already been cleaned
 * This assumes the posted buffers have already been cleaned
 * and de-allocated
 */
static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
{
	dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
			   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
	cq->sq.desc_buf.va = NULL;
	cq->sq.desc_buf.pa = 0;
	cq->sq.desc_buf.size = 0;
}

/**
 * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
 * @hw: pointer to the hardware structure
 * @cq: pointer to the specific Control queue
 *
 * This assumes the posted receive buffers have already been cleaned
 * and de-allocated
 */
static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
	dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
			   cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
	cq->rq.desc_buf.va = NULL;
	cq->rq.desc_buf.pa = 0;
	cq->rq.desc_buf.size = 0;
	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
			   ring->desc_buf.va, ring->desc_buf.pa);
	ring->desc_buf.va = NULL;
	ring->desc_buf.pa = 0;
	ring->desc_buf.size = 0;
}

/**
@@ -280,54 +250,23 @@ unwind_alloc_sq_bufs:
	return ICE_ERR_NO_MEMORY;
}

/**
 * ice_free_rq_bufs - Free ARQ buffer info elements
 * @hw: pointer to the hardware structure
 * @cq: pointer to the specific Control queue
 */
static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
	int i;

	/* free descriptors */
	for (i = 0; i < cq->num_rq_entries; i++) {
		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
		cq->rq.r.rq_bi[i].va = NULL;
		cq->rq.r.rq_bi[i].pa = 0;
		cq->rq.r.rq_bi[i].size = 0;
	}

	/* free the dma header */
	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
}

/**
 * ice_free_sq_bufs - Free ATQ buffer info elements
 * @hw: pointer to the hardware structure
 * @cq: pointer to the specific Control queue
 */
static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
	int i;
	/* Clear Head and Tail */
	wr32(hw, ring->head, 0);
	wr32(hw, ring->tail, 0);

	/* only unmap if the address is non-NULL */
	for (i = 0; i < cq->num_sq_entries; i++)
		if (cq->sq.r.sq_bi[i].pa) {
			dmam_free_coherent(ice_hw_to_dev(hw),
					   cq->sq.r.sq_bi[i].size,
					   cq->sq.r.sq_bi[i].va,
					   cq->sq.r.sq_bi[i].pa);
			cq->sq.r.sq_bi[i].va = NULL;
			cq->sq.r.sq_bi[i].pa = 0;
			cq->sq.r.sq_bi[i].size = 0;
		}
	/* set starting point */
	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));

	/* free the buffer info list */
	devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
	/* Check one register to verify that config was applied */
	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
		return ICE_ERR_AQ_ERROR;

	/* free the dma header */
	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
	return 0;
}

/**
@@ -340,23 +279,7 @@ static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
	u32 reg = 0;

	/* Clear Head and Tail */
	wr32(hw, cq->sq.head, 0);
	wr32(hw, cq->sq.tail, 0);

	/* set starting point */
	wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
	wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
	wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));

	/* Check one register to verify that config was applied */
	reg = rd32(hw, cq->sq.bal);
	if (reg != lower_32_bits(cq->sq.desc_buf.pa))
		return ICE_ERR_AQ_ERROR;

	return 0;
	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
}

/**
@@ -369,25 +292,15 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
	u32 reg = 0;

	/* Clear Head and Tail */
	wr32(hw, cq->rq.head, 0);
	wr32(hw, cq->rq.tail, 0);
	enum ice_status status;

	/* set starting point */
	wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
	wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
	wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
	if (status)
		return status;

	/* Update tail in the HW to post pre-allocated buffers */
	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));

	/* Check one register to verify that config was applied */
	reg = rd32(hw, cq->rq.bal);
	if (reg != lower_32_bits(cq->rq.desc_buf.pa))
		return ICE_ERR_AQ_ERROR;

	return 0;
}

@@ -444,7 +357,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
	goto init_ctrlq_exit;

init_ctrlq_free_rings:
	ice_free_ctrlq_sq_ring(hw, cq);
	ice_free_cq_ring(hw, &cq->sq);

init_ctrlq_exit:
	return ret_code;
@@ -503,12 +416,33 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
	goto init_ctrlq_exit;

init_ctrlq_free_rings:
	ice_free_ctrlq_rq_ring(hw, cq);
	ice_free_cq_ring(hw, &cq->rq);

init_ctrlq_exit:
	return ret_code;
}

#define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
do {									\
	int i;								\
	/* free descriptors */						\
	for (i = 0; i < (qi)->num_##ring##_entries; i++)		\
		if ((qi)->ring.r.ring##_bi[i].pa) {			\
			dmam_free_coherent(ice_hw_to_dev(hw),		\
					   (qi)->ring.r.ring##_bi[i].size,\
					   (qi)->ring.r.ring##_bi[i].va,\
					   (qi)->ring.r.ring##_bi[i].pa);\
			(qi)->ring.r.ring##_bi[i].va = NULL;		\
			(qi)->ring.r.ring##_bi[i].pa = 0;		\
			(qi)->ring.r.ring##_bi[i].size = 0;		\
		}							\
	/* free the buffer info list */					\
	if ((qi)->ring.cmd_buf)						\
		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
	/* free dma head */						\
	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
} while (0)

/**
 * ice_shutdown_sq - shutdown the Control ATQ
 * @hw: pointer to the hardware structure
@@ -538,8 +472,8 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
	cq->sq.count = 0;	/* to indicate uninitialized queue */

	/* free ring buffers and the ring itself */
	ice_free_sq_bufs(hw, cq);
	ice_free_ctrlq_sq_ring(hw, cq);
	ICE_FREE_CQ_BUFS(hw, cq, sq);
	ice_free_cq_ring(hw, &cq->sq);

shutdown_sq_out:
	mutex_unlock(&cq->sq_lock);
@@ -606,8 +540,8 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
	cq->rq.count = 0;

	/* free ring buffers and the ring itself */
	ice_free_rq_bufs(hw, cq);
	ice_free_ctrlq_rq_ring(hw, cq);
	ICE_FREE_CQ_BUFS(hw, cq, rq);
	ice_free_cq_ring(hw, &cq->rq);

shutdown_rq_out:
	mutex_unlock(&cq->rq_lock);
@@ -657,7 +591,6 @@ init_ctrlq_free_rq:
 *     - cq->num_rq_entries
 *     - cq->rq_buf_size
 *     - cq->sq_buf_size
 *
 */
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -841,7 +774,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
 * @cd: pointer to command details structure
 *
 * This is the main send command routine for the ATQ.  It runs the q,
 * This is the main send command routine for the ATQ. It runs the queue,
 * cleans the queue, etc.
 */
enum ice_status
+13 −13
Original line number Diff line number Diff line
@@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
 * The PF_STATs are appended to the netdev stats only when ethtool -S
 * is queried on the base PF netdev.
 */
static struct ice_stats ice_gstrings_pf_stats[] = {
static const struct ice_stats ice_gstrings_pf_stats[] = {
	ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
	ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
	ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
@@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = {
	ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
};

static u32 ice_regs_dump_list[] = {
static const u32 ice_regs_dump_list[] = {
	PFGEN_STATE,
	PRTGEN_STATUS,
	QRX_CTRL(0),
@@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev)
{
	/* restart autonegotiation */
	struct ice_netdev_priv *np = netdev_priv(netdev);
	struct ice_link_status *hw_link_info;
	struct ice_vsi *vsi = np->vsi;
	struct ice_port_info *pi;
	enum ice_status status;
	bool link_up;

	pi = vsi->port_info;
	hw_link_info = &pi->phy.link_info;
	link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
	/* If VSI state is up, then restart autoneg with link up */
	if (!test_bit(__ICE_DOWN, vsi->back->state))
		status = ice_aq_set_link_restart_an(pi, true, NULL);
	else
		status = ice_aq_set_link_restart_an(pi, false, NULL);

	status = ice_aq_set_link_restart_an(pi, link_up, NULL);
	if (status) {
		netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
			    status, pi->hw->adminq.sq_last_status);
@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/**
 * ice_set_pauseparam - Set Flow Control parameter
 * @netdev: network interface device structure
 * @pause: return tx/rx flow control status
 * @pause: return Tx/Rx flow control status
 */
static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
}

/**
 * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
 * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
 * @netdev: network interface device structure
 *
 * Returns the table size.
@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
}

/**
 * ice_get_rxfh - get the rx flow hash indirection table
 * ice_get_rxfh - get the Rx flow hash indirection table
 * @netdev: network interface device structure
 * @indir: indirection table
 * @key: hash key
@@ -1603,7 +1603,7 @@ out:
}

/**
 * ice_set_rxfh - set the rx flow hash indirection table
 * ice_set_rxfh - set the Rx flow hash indirection table
 * @netdev: network interface device structure
 * @indir: indirection table
 * @key: hash key
Loading