Commit 1eafa736 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qed-warnings-cleanup'



Alexander Lobakin says:

====================
net: qed/qede: W=1 C=1 warnings cleanup

This set cleans qed/qede build log under W=1 C=1 with GCC 8 and
sparse 0.6.2. The only thing left is "context imbalance -- unexpected
unlock" in one of the source files, which will be issued later during
the refactoring cycles.

The biggest part is handling the endianness warnings. The current code
often just assumes that both host and device operate in LE, which is
obviously incorrect (despite the fact that it's true for x86 platforms),
and makes sparse {s,m}ad.

The rest of the series is mostly random non-functional fixes
here-and-there.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9e06e859 fd081662
Loading
Loading
Loading
Loading
+10 −4
Original line number Diff line number Diff line
@@ -73,8 +73,8 @@ union type1_task_context {
};

struct src_ent {
	u8 opaque[56];
	u64 next;
	__u8				opaque[56];
	__be64				next;
};

#define CDUT_SEG_ALIGNMET		3 /* in 4k chunks */
@@ -2170,12 +2170,14 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
			  enum qed_cxt_elem_type elem_type, u32 iid)
{
	u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
	struct tdif_task_context *tdif_context;
	struct qed_ilt_client_cfg *p_cli;
	struct qed_ilt_cli_blk *p_blk;
	struct qed_ptt *p_ptt;
	dma_addr_t p_phys;
	u64 ilt_hw_entry;
	void *p_virt;
	u32 flags1;
	int rc = 0;

	switch (elem_type) {
@@ -2252,8 +2254,12 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,

		for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
			elem = (union type1_task_context *)elem_start;
			SET_FIELD(elem->roce_ctx.tdif_context.flags1,
				  TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
			tdif_context = &elem->roce_ctx.tdif_context;

			flags1 = le32_to_cpu(tdif_context->flags1);
			SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
			tdif_context->flags1 = cpu_to_le32(flags1);

			elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
		}
	}
+13 −14
Original line number Diff line number Diff line
@@ -547,7 +547,8 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
		      struct dcbx_ets_feature *p_ets,
		      struct qed_dcbx_params *p_params)
{
	u32 bw_map[2], tsa_map[2], pri_map;
	__be32 bw_map[2], tsa_map[2];
	u32 pri_map;
	int i;

	p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
@@ -573,11 +574,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
	/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
	 * encoded in a type u32 array of size 2.
	 */
	bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
	bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
	tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
	tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
	cpu_to_be32_array(bw_map, p_ets->tc_bw_tbl, 2);
	cpu_to_be32_array(tsa_map, p_ets->tc_tsa_tbl, 2);
	pri_map = p_ets->pri_tc_tbl[0];

	for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
		p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
		p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -1054,7 +1054,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
		      struct dcbx_ets_feature *p_ets,
		      struct qed_dcbx_params *p_params)
{
	u8 *bw_map, *tsa_map;
	__be32 bw_map[2], tsa_map[2];
	u32 val;
	int i;

@@ -1076,22 +1076,21 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
	p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
	p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;

	bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
	tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
	p_ets->pri_tc_tbl[0] = 0;

	for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
		bw_map[i] = p_params->ets_tc_bw_tbl[i];
		tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
		((u8 *)bw_map)[i] = p_params->ets_tc_bw_tbl[i];
		((u8 *)tsa_map)[i] = p_params->ets_tc_tsa_tbl[i];

		/* Copy the priority value to the corresponding 4 bits in the
		 * traffic class table.
		 */
		val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
		p_ets->pri_tc_tbl[0] |= val;
	}
	for (i = 0; i < 2; i++) {
		p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
		p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
	}

	be32_to_cpu_array(p_ets->tc_bw_tbl, bw_map, 2);
	be32_to_cpu_array(p_ets->tc_tsa_tbl, tsa_map, 2);
}

static void
+2 −0
Original line number Diff line number Diff line
@@ -81,6 +81,8 @@ struct qed_dcbx_mib_meta_data {
	u32 addr;
};

extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;

#ifdef CONFIG_DCB
int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);

+30 −22
Original line number Diff line number Diff line
@@ -972,7 +972,7 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
{
	struct storm_defs *storm = &s_storm_defs[storm_id];
	struct fw_info_location fw_info_location;
	u32 addr, i, *dest;
	u32 addr, i, size, *dest;

	memset(&fw_info_location, 0, sizeof(fw_info_location));
	memset(fw_info, 0, sizeof(*fw_info));
@@ -985,20 +985,29 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
	    sizeof(fw_info_location);

	dest = (u32 *)&fw_info_location;
	size = BYTES_TO_DWORDS(sizeof(fw_info_location));

	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
	     i++, addr += BYTES_IN_DWORD)
	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
		dest[i] = qed_rd(p_hwfn, p_ptt, addr);

	/* qed_rq() fetches data in CPU byteorder. Swap it back to
	 * the device's to get right structure layout.
	 */
	cpu_to_le32_array(dest, size);

	/* Read FW version info from Storm RAM */
	if (fw_info_location.size > 0 && fw_info_location.size <=
	    sizeof(*fw_info)) {
		addr = fw_info_location.grc_addr;
	size = le32_to_cpu(fw_info_location.size);
	if (!size || size > sizeof(*fw_info))
		return;

	addr = le32_to_cpu(fw_info_location.grc_addr);
	dest = (u32 *)fw_info;
		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
		     i++, addr += BYTES_IN_DWORD)
	size = BYTES_TO_DWORDS(size);

	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
	}

	cpu_to_le32_array(dest, size);
}

/* Dumps the specified string to the specified buffer.
@@ -1122,9 +1131,8 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
				     dump, "fw-version", fw_ver_str);
	offset += qed_dump_str_param(dump_buf + offset,
				     dump, "fw-image", fw_img_str);
	offset += qed_dump_num_param(dump_buf + offset,
				     dump,
				     "fw-timestamp", fw_info.ver.timestamp);
	offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
				     le32_to_cpu(fw_info.ver.timestamp));

	return offset;
}
@@ -4441,9 +4449,11 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
			continue;
		}

		addr = le16_to_cpu(asserts->section_ram_line_offset);
		fw_asserts_section_addr = storm->sem_fast_mem_addr +
					  SEM_FAST_REG_INT_RAM +
			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
					  RAM_LINES_TO_BYTES(addr);

		next_list_idx_addr = fw_asserts_section_addr +
			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
@@ -7651,8 +7661,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
{
	struct qed_hwfn *p_hwfn =
		&cdev->hwfns[cdev->engine_for_debug];
	u32 len_rounded, i;
	__be32 val;
	u32 len_rounded;
	int rc;

	*num_dumped_bytes = 0;
@@ -7671,10 +7680,9 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,

	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
	if (image_id != QED_NVM_IMAGE_NVM_META)
		for (i = 0; i < len_rounded; i += 4) {
			val = cpu_to_be32(*(u32 *)(buffer + i));
			*(u32 *)(buffer + i) = val;
		}
		cpu_to_be32_array((__force __be32 *)buffer,
				  (const u32 *)buffer,
				  len_rounded / sizeof(u32));

	*num_dumped_bytes = len_rounded;

+28 −26
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
	struct qed_cxt_info cxt_info;
	u32 dummy_cid;
	int rc = 0;
	u16 tmp;
	__le16 tmp;
	u8 i;

	/* Get SPQ entry */
@@ -162,17 +162,13 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
	tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
	p_data->q_params.cmdq_num_entries = tmp;

	tmp = fcoe_pf_params->num_cqs;
	p_data->q_params.num_queues = (u8)tmp;
	p_data->q_params.num_queues = fcoe_pf_params->num_cqs;

	tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
	p_data->q_params.queue_relative_offset = (u8)tmp;
	tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
	p_data->q_params.queue_relative_offset = (__force u8)tmp;

	for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
		u16 igu_sb_id;

		igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
		tmp = cpu_to_le16(igu_sb_id);
		tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i));
		p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
	}

@@ -185,21 +181,21 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
	p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
	p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
	tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]);
	p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp;
	tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]);
	p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp;

	DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
	p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
	p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
	tmp = fcoe_pf_params->rq_buffer_size;
	p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
	tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]);
	p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp;
	tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]);
	p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp;
	tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size);
	p_data->q_params.rq_buffer_size = tmp;

	if (fcoe_pf_params->is_target) {
		SET_FIELD(p_data->q_params.q_validity,
@@ -233,7 +229,8 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
	struct fcoe_conn_offload_ramrod_data *p_data;
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	u16 physical_q0, tmp;
	u16 physical_q0;
	__le16 tmp;
	int rc;

	/* Get SPQ entry */
@@ -254,7 +251,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,

	/* Transmission PQ is the first of the PF */
	physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
	p_conn->physical_q0 = cpu_to_le16(physical_q0);
	p_conn->physical_q0 = physical_q0;
	p_data->physical_q0 = cpu_to_le16(physical_q0);

	p_data->conn_id = cpu_to_le16(p_conn->conn_id);
@@ -553,8 +550,8 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
	struct e4_fcoe_task_context *p_task_ctx = NULL;
	u32 i, lc;
	int rc;
	u32 i;

	spin_lock_init(&p_hwfn->p_fcoe_info->lock);
	for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
@@ -565,10 +562,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
			continue;

		memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
		SET_FIELD(p_task_ctx->timer_context.logical_client_0,
			  TIMERS_CONTEXT_VALIDLC0, 1);
		SET_FIELD(p_task_ctx->timer_context.logical_client_1,
			  TIMERS_CONTEXT_VALIDLC1, 1);

		lc = 0;
		SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
		p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc);

		lc = 0;
		SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1);
		p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);

		SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
			  E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
	}
Loading