Commit 0b6591e6 authored by Jesse Brandeburg's avatar Jesse Brandeburg Committed by Jeff Kirsher
Browse files

iavf: rename i40e_status to iavf_status



This is just a rename of an internal variable i40e_status, but
it was a pretty big change and so deserved it's own patch.

Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 129cf89e
Loading
Loading
Loading
Loading
+40 −41
Original line number Diff line number Diff line
@@ -34,9 +34,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
 *  @hw: pointer to the hardware structure
 **/
static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
static iavf_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
	i40e_status ret_code;
	iavf_status ret_code;

	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
					 i40e_mem_atq_ring,
@@ -61,9 +61,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
 *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
 *  @hw: pointer to the hardware structure
 **/
static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
static iavf_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
	i40e_status ret_code;
	iavf_status ret_code;

	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
					 i40e_mem_arq_ring,
@@ -102,11 +102,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
 *  @hw: pointer to the hardware structure
 **/
static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
static iavf_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
	i40e_status ret_code;
	struct i40e_aq_desc *desc;
	struct i40e_dma_mem *bi;
	iavf_status ret_code;
	int i;

	/* We'll be allocating the buffer info memory first, then we can
@@ -115,7 +115,8 @@ static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)

	/* buffer_info structures do not need alignment */
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
					  (hw->aq.num_arq_entries *
					   sizeof(struct i40e_dma_mem)));
	if (ret_code)
		goto alloc_arq_bufs;
	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
@@ -169,15 +170,16 @@ unwind_alloc_arq_bufs:
 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
 *  @hw: pointer to the hardware structure
 **/
static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
static iavf_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
	i40e_status ret_code;
	struct i40e_dma_mem *bi;
	iavf_status ret_code;
	int i;

	/* No mapped memory needed yet, just the buffer info structures */
	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
					  (hw->aq.num_asq_entries *
					   sizeof(struct i40e_dma_mem)));
	if (ret_code)
		goto alloc_asq_bufs;
	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
@@ -253,9 +255,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
 *
 *  Configure base address and length registers for the transmit queue
 **/
static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
static iavf_status i40e_config_asq_regs(struct i40e_hw *hw)
{
	i40e_status ret_code = 0;
	iavf_status ret_code = 0;
	u32 reg = 0;

	/* Clear Head and Tail */
@@ -282,9 +284,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
 *
 * Configure base address and length registers for the receive (event queue)
 **/
static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
static iavf_status i40e_config_arq_regs(struct i40e_hw *hw)
{
	i40e_status ret_code = 0;
	iavf_status ret_code = 0;
	u32 reg = 0;

	/* Clear Head and Tail */
@@ -321,9 +323,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 *  called are not going to be atomic context safe
 **/
static i40e_status i40e_init_asq(struct i40e_hw *hw)
static iavf_status i40e_init_asq(struct i40e_hw *hw)
{
	i40e_status ret_code = 0;
	iavf_status ret_code = 0;

	if (hw->aq.asq.count > 0) {
		/* queue already initialized */
@@ -380,9 +382,9 @@ init_adminq_exit:
 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 *  called are not going to be atomic context safe
 **/
static i40e_status i40e_init_arq(struct i40e_hw *hw)
static iavf_status i40e_init_arq(struct i40e_hw *hw)
{
	i40e_status ret_code = 0;
	iavf_status ret_code = 0;

	if (hw->aq.arq.count > 0) {
		/* queue already initialized */
@@ -432,9 +434,9 @@ init_adminq_exit:
 *
 *  The main shutdown routine for the Admin Send Queue
 **/
static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
static iavf_status i40e_shutdown_asq(struct i40e_hw *hw)
{
	i40e_status ret_code = 0;
	iavf_status ret_code = 0;

	mutex_lock(&hw->aq.asq_mutex);

@@ -466,9 +468,9 @@ shutdown_asq_out:
 *
 *  The main shutdown routine for the Admin Receive Queue
 **/
static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
static iavf_status i40e_shutdown_arq(struct i40e_hw *hw)
{
	i40e_status ret_code = 0;
	iavf_status ret_code = 0;

	mutex_lock(&hw->aq.arq_mutex);

@@ -505,9 +507,9 @@ shutdown_arq_out:
 *     - hw->aq.arq_buf_size
 *     - hw->aq.asq_buf_size
 **/
i40e_status iavf_init_adminq(struct i40e_hw *hw)
iavf_status iavf_init_adminq(struct i40e_hw *hw)
{
	i40e_status ret_code;
	iavf_status ret_code;

	/* verify input for valid configuration */
	if ((hw->aq.num_arq_entries == 0) ||
@@ -549,9 +551,9 @@ init_adminq_exit:
 *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
 *  @hw: pointer to the hardware structure
 **/
i40e_status iavf_shutdown_adminq(struct i40e_hw *hw)
iavf_status iavf_shutdown_adminq(struct i40e_hw *hw)
{
	i40e_status ret_code = 0;
	iavf_status ret_code = 0;

	if (iavf_check_asq_alive(hw))
		iavf_aq_queue_shutdown(hw, true);
@@ -570,7 +572,7 @@ i40e_status iavf_shutdown_adminq(struct i40e_hw *hw)
 **/
static u16 i40e_clean_asq(struct i40e_hw *hw)
{
	struct i40e_adminq_ring *asq = &(hw->aq.asq);
	struct i40e_adminq_ring *asq = &hw->aq.asq;
	struct i40e_asq_cmd_details *details;
	u16 ntc = asq->next_to_clean;
	struct i40e_aq_desc desc_cb;
@@ -616,7 +618,6 @@ bool iavf_asq_done(struct i40e_hw *hw)
	 * timing reliability than DD bit
	 */
	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;

}

/**
@@ -630,13 +631,13 @@ bool iavf_asq_done(struct i40e_hw *hw)
 *  This is the main send command driver routine for the Admin Queue send
 *  queue.  It runs the queue, cleans the queue, etc
 **/
i40e_status iavf_asq_send_command(struct i40e_hw *hw,
iavf_status iavf_asq_send_command(struct i40e_hw *hw,
				  struct i40e_aq_desc *desc,
				  void *buff, /* can be NULL */
				  u16  buff_size,
				  struct i40e_asq_cmd_details *cmd_details)
{
	i40e_status status = 0;
	iavf_status status = 0;
	struct i40e_dma_mem *dma_buff = NULL;
	struct i40e_asq_cmd_details *details;
	struct i40e_aq_desc *desc_on_ring;
@@ -724,8 +725,8 @@ i40e_status iavf_asq_send_command(struct i40e_hw *hw,
	*desc_on_ring = *desc;

	/* if buff is not NULL assume indirect command */
	if (buff != NULL) {
		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
	if (buff) {
		dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
		/* copy the user buff into the respective DMA buff */
		memcpy(dma_buff->va, buff, buff_size);
		desc_on_ring->datalen = cpu_to_le16(buff_size);
@@ -769,7 +770,7 @@ i40e_status iavf_asq_send_command(struct i40e_hw *hw,
	/* if ready, copy the desc back to temp */
	if (iavf_asq_done(hw)) {
		*desc = *desc_on_ring;
		if (buff != NULL)
		if (buff)
			memcpy(buff, dma_buff->va, buff_size);
		retval = le16_to_cpu(desc->retval);
		if (retval != 0) {
@@ -793,8 +794,7 @@ i40e_status iavf_asq_send_command(struct i40e_hw *hw,

	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
		   "AQTX: desc and buffer writeback:\n");
	iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
		      buff_size);
	iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);

	/* save writeback aq if requested */
	if (details->wb_desc)
@@ -826,8 +826,7 @@ asq_send_command_error:
 *
 *  Fill the desc with default values
 **/
void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
				       u16 opcode)
void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
{
	/* zero out the desc */
	memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
@@ -845,13 +844,13 @@ void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
 *  the contents through e.  It can also return how many events are
 *  left to process through 'pending'
 **/
i40e_status iavf_clean_arq_element(struct i40e_hw *hw,
iavf_status iavf_clean_arq_element(struct i40e_hw *hw,
				   struct i40e_arq_event_info *e,
				   u16 *pending)
{
	i40e_status ret_code = 0;
	u16 ntc = hw->aq.arq.next_to_clean;
	struct i40e_aq_desc *desc;
	iavf_status ret_code = 0;
	struct i40e_dma_mem *bi;
	u16 desc_idx;
	u16 datalen;
@@ -897,7 +896,7 @@ i40e_status iavf_clean_arq_element(struct i40e_hw *hw,
	e->desc = *desc;
	datalen = le16_to_cpu(desc->datalen);
	e->msg_len = min(datalen, e->buf_len);
	if (e->msg_buf != NULL && (e->msg_len != 0))
	if (e->msg_buf && (e->msg_len != 0))
		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
		       e->msg_len);

@@ -930,7 +929,7 @@ i40e_status iavf_clean_arq_element(struct i40e_hw *hw,

clean_arq_element_out:
	/* Set pending if needed, unlock and return */
	if (pending != NULL)
	if (pending)
		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);

clean_arq_element_err:
+7 −11
Original line number Diff line number Diff line
@@ -20,16 +20,12 @@ enum i40e_memory_type {
};

/* prototype for functions used for dynamic memory allocation */
i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
					    struct i40e_dma_mem *mem,
iavf_status i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
				  enum i40e_memory_type type,
				  u64 size, u32 alignment);
i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
					struct i40e_dma_mem *mem);
i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
					     struct i40e_virt_mem *mem,
					     u32 size);
i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
					 struct i40e_virt_mem *mem);
iavf_status i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem);
iavf_status i40e_allocate_virt_mem(struct i40e_hw *hw,
				   struct i40e_virt_mem *mem, u32 size);
iavf_status i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem);

#endif /* _I40E_ALLOC_H_ */
+25 −27
Original line number Diff line number Diff line
@@ -13,9 +13,9 @@
 * This function sets the mac type of the adapter based on the
 * vendor ID and device ID stored in the hw structure.
 **/
i40e_status i40e_set_mac_type(struct i40e_hw *hw)
iavf_status i40e_set_mac_type(struct i40e_hw *hw)
{
	i40e_status status = 0;
	iavf_status status = 0;

	if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
		switch (hw->device_id) {
@@ -56,8 +56,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
	}

	hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
		  hw->mac.type, status);
	hw_dbg(hw, "found mac: %d, returns: %d\n", hw->mac.type, status);
	return status;
}

@@ -126,7 +125,7 @@ const char *iavf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
 * @hw: pointer to the HW structure
 * @stat_err: the status error code to convert
 **/
const char *iavf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
const char *iavf_stat_str(struct i40e_hw *hw, iavf_status stat_err)
{
	switch (stat_err) {
	case 0:
@@ -285,7 +284,7 @@ void iavf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
	u8 *buf = (u8 *)buffer;

	if ((!(mask & hw->debug_mask)) || (desc == NULL))
	if ((!(mask & hw->debug_mask)) || !desc)
		return;

	i40e_debug(hw, mask,
@@ -304,7 +303,7 @@ void iavf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
		   le32_to_cpu(aq_desc->params.external.addr_high),
		   le32_to_cpu(aq_desc->params.external.addr_low));

	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
	if (buffer && aq_desc->datalen) {
		u16 len = le16_to_cpu(aq_desc->datalen);

		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
@@ -349,16 +348,14 @@ bool iavf_check_asq_alive(struct i40e_hw *hw)
 * Tell the Firmware that we're shutting down the AdminQ and whether
 * or not the driver is unloading as well.
 **/
i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw,
				   bool unloading)
iavf_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading)
{
	struct i40e_aq_desc desc;
	struct i40e_aqc_queue_shutdown *cmd =
		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
	i40e_status status;
	iavf_status status;

	iavf_fill_default_direct_cmd_desc(&desc,
					  i40e_aqc_opc_queue_shutdown);
	iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_queue_shutdown);

	if (unloading)
		cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
@@ -378,12 +375,12 @@ i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw,
 *
 * Internal function to get or set RSS look up table
 **/
static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
static iavf_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
					   u16 vsi_id, bool pf_lut,
					   u8 *lut, u16 lut_size,
					   bool set)
{
	i40e_status status;
	iavf_status status;
	struct i40e_aq_desc desc;
	struct i40e_aqc_get_set_rss_lut *cmd_resp =
		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
@@ -431,7 +428,7 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
 *
 * get the RSS lookup table, PF or VSI type
 **/
i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
iavf_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
				bool pf_lut, u8 *lut, u16 lut_size)
{
	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
@@ -448,7 +445,7 @@ i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
 *
 * set the RSS lookup table, PF or VSI type
 **/
i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
iavf_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
				bool pf_lut, u8 *lut, u16 lut_size)
{
	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
@@ -463,11 +460,12 @@ i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
 *
 * get the RSS key per VSI
 **/
static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
static
iavf_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
				    struct i40e_aqc_get_set_rss_key_data *key,
				    bool set)
{
	i40e_status status;
	iavf_status status;
	struct i40e_aq_desc desc;
	struct i40e_aqc_get_set_rss_key *cmd_resp =
			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
@@ -502,7 +500,7 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
 * @key: pointer to key info struct
 *
 **/
i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id,
iavf_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id,
				struct i40e_aqc_get_set_rss_key_data *key)
{
	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
@@ -516,7 +514,7 @@ i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id,
 *
 * set the RSS key per VSI
 **/
i40e_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
iavf_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
				struct i40e_aqc_get_set_rss_key_data *key)
{
	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
@@ -900,14 +898,14 @@ struct i40e_rx_ptype_decoded iavf_ptype_lookup[] = {
 * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
 * completion before returning.
 **/
i40e_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
iavf_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
				   enum virtchnl_ops v_opcode,
				   i40e_status v_retval, u8 *msg, u16 msglen,
				   iavf_status v_retval, u8 *msg, u16 msglen,
				   struct i40e_asq_cmd_details *cmd_details)
{
	struct i40e_aq_desc desc;
	struct i40e_asq_cmd_details details;
	i40e_status status;
	struct i40e_aq_desc desc;
	iavf_status status;

	iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
@@ -971,7 +969,7 @@ void iavf_vf_parse_hw_config(struct i40e_hw *hw,
 * as none will be forthcoming. Immediately after calling this function,
 * the admin queue should be shut down and (optionally) reinitialized.
 **/
i40e_status iavf_vf_reset(struct i40e_hw *hw)
iavf_status iavf_vf_reset(struct i40e_hw *hw)
{
	return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
				      0, NULL, 0, NULL);
+1 −1
Original line number Diff line number Diff line
@@ -48,5 +48,5 @@ struct i40e_virt_mem {
extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
	__attribute__ ((format(gnu_printf, 3, 4)));

typedef enum i40e_status_code i40e_status;
typedef enum i40e_status_code iavf_status;
#endif /* _I40E_OSDEP_H_ */
+14 −15
Original line number Diff line number Diff line
@@ -16,14 +16,13 @@
 */

/* adminq functions */
i40e_status iavf_init_adminq(struct i40e_hw *hw);
i40e_status iavf_shutdown_adminq(struct i40e_hw *hw);
iavf_status iavf_init_adminq(struct i40e_hw *hw);
iavf_status iavf_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status iavf_clean_arq_element(struct i40e_hw *hw,
iavf_status iavf_clean_arq_element(struct i40e_hw *hw,
				   struct i40e_arq_event_info *e,
				   u16 *events_pending);
i40e_status iavf_asq_send_command(struct i40e_hw *hw,
				  struct i40e_aq_desc *desc,
iavf_status iavf_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
				  void *buff, /* can be NULL */
				  u16 buff_size,
				  struct i40e_asq_cmd_details *cmd_details);
@@ -36,20 +35,20 @@ void iavf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
void iavf_resume_aq(struct i40e_hw *hw);
bool iavf_check_asq_alive(struct i40e_hw *hw);
i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
iavf_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *iavf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *iavf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
const char *iavf_stat_str(struct i40e_hw *hw, iavf_status stat_err);

i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
iavf_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
				bool pf_lut, u8 *lut, u16 lut_size);
i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
iavf_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
				bool pf_lut, u8 *lut, u16 lut_size);
i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 seid,
iavf_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 seid,
				struct i40e_aqc_get_set_rss_key_data *key);
i40e_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 seid,
iavf_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 seid,
				struct i40e_aqc_get_set_rss_key_data *key);

i40e_status i40e_set_mac_type(struct i40e_hw *hw);
iavf_status i40e_set_mac_type(struct i40e_hw *hw);

extern struct i40e_rx_ptype_decoded iavf_ptype_lookup[];

@@ -61,9 +60,9 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
/* i40e_common for VF drivers*/
void iavf_vf_parse_hw_config(struct i40e_hw *hw,
			     struct virtchnl_vf_resource *msg);
i40e_status iavf_vf_reset(struct i40e_hw *hw);
i40e_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
iavf_status iavf_vf_reset(struct i40e_hw *hw);
iavf_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
				   enum virtchnl_ops v_opcode,
				   i40e_status v_retval, u8 *msg, u16 msglen,
				   iavf_status v_retval, u8 *msg, u16 msglen,
				   struct i40e_asq_cmd_details *cmd_details);
#endif /* _I40E_PROTOTYPE_H_ */
Loading