Commit 2591c280 authored by Joe Perches's avatar Joe Perches Committed by David S. Miller
Browse files

qed: Remove OOM messages



These messages are unnecessary as OOM allocation failures already do
a dump_stack() giving more or less the same information.

$ size drivers/net/ethernet/qlogic/qed/built-in.o* (defconfig x86-64)
   text	   data	    bss	    dec	    hex	filename
 127817	  27969	  32800	 188586	  2e0aa	drivers/net/ethernet/qlogic/qed/built-in.o.new
 132474	  27969	  32800	 193243	  2f2db	drivers/net/ethernet/qlogic/qed/built-in.o.old

Miscellanea:

o Change allocs to the generally preferred forms where possible.

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c7ee5672
Loading
Loading
Loading
Loading
+5 −15
Original line number Diff line number Diff line
@@ -792,10 +792,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
	p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);

	/* allocate t2 */
	p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
	p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
			     GFP_KERNEL);
	if (!p_mngr->t2) {
		DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
		rc = -ENOMEM;
		goto t2_fail;
	}
@@ -957,7 +956,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
	p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
				     GFP_KERNEL);
	if (!p_mngr->ilt_shadow) {
		DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
		rc = -ENOMEM;
		goto ilt_shadow_fail;
	}
@@ -1050,10 +1048,8 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
	u32 i;

	p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
	if (!p_mngr) {
		DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
	if (!p_mngr)
		return -ENOMEM;
	}

	/* Initialize ILT client registers */
	clients = p_mngr->clients;
@@ -1105,24 +1101,18 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)

	/* Allocate the ILT shadow table */
	rc = qed_ilt_shadow_alloc(p_hwfn);
	if (rc) {
		DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
	if (rc)
		goto tables_alloc_fail;
	}

	/* Allocate the T2  table */
	rc = qed_cxt_src_t2_alloc(p_hwfn);
	if (rc) {
		DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
	if (rc)
		goto tables_alloc_fail;
	}

	/* Allocate and initialize the acquired cids bitmaps */
	rc = qed_cid_map_alloc(p_hwfn);
	if (rc) {
		DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
	if (rc)
		goto tables_alloc_fail;
	}

	return 0;

+3 −10
Original line number Diff line number Diff line
@@ -874,11 +874,8 @@ int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
	int rc = 0;

	p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
	if (!p_hwfn->p_dcbx_info) {
		DP_NOTICE(p_hwfn,
			  "Failed to allocate 'struct qed_dcbx_info'\n");
	if (!p_hwfn->p_dcbx_info)
		rc = -ENOMEM;
	}

	return rc;
}
@@ -1176,10 +1173,8 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
	}

	dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
	if (!dcbx_info) {
		DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
	if (!dcbx_info)
		return -ENOMEM;
	}

	rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
	if (rc) {
@@ -1213,10 +1208,8 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
	struct qed_dcbx_get *dcbx_info;

	dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
	if (!dcbx_info) {
		DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
	if (!dcbx_info)
		return NULL;
	}

	if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
		kfree(dcbx_info);
+15 −45
Original line number Diff line number Diff line
@@ -340,7 +340,6 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
	return 0;

alloc_err:
	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
	qed_qm_info_free(p_hwfn);
	return -ENOMEM;
}
@@ -424,19 +423,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
				     RESC_NUM(p_hwfn, QED_L2_QUEUE);

		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
		if (!p_hwfn->p_tx_cids) {
			DP_NOTICE(p_hwfn,
				  "Failed to allocate memory for Tx Cids\n");
		if (!p_hwfn->p_tx_cids)
			goto alloc_no_mem;
		}

		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
		if (!p_hwfn->p_rx_cids) {
			DP_NOTICE(p_hwfn,
				  "Failed to allocate memory for Rx Cids\n");
		if (!p_hwfn->p_rx_cids)
			goto alloc_no_mem;
	}
	}

	for_each_hwfn(cdev, i) {
		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -522,26 +515,18 @@ int qed_resc_alloc(struct qed_dev *cdev)

		/* DMA info initialization */
		rc = qed_dmae_info_alloc(p_hwfn);
		if (rc) {
			DP_NOTICE(p_hwfn,
				  "Failed to allocate memory for dmae_info structure\n");
		if (rc)
			goto alloc_err;
		}

		/* DCBX initialization */
		rc = qed_dcbx_info_alloc(p_hwfn);
		if (rc) {
			DP_NOTICE(p_hwfn,
				  "Failed to allocate memory for dcbx structure\n");
		if (rc)
			goto alloc_err;
	}
	}

	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
	if (!cdev->reset_stats) {
		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
	if (!cdev->reset_stats)
		goto alloc_no_mem;
	}

	return 0;

@@ -1713,10 +1698,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,

	/* Allocate PTT pool */
	rc = qed_ptt_pool_alloc(p_hwfn);
	if (rc) {
		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
	if (rc)
		goto err0;
	}

	/* Allocate the main PTT */
	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
@@ -1746,10 +1729,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,

	/* Allocate the init RT array and initialize the init-ops engine */
	rc = qed_init_alloc(p_hwfn);
	if (rc) {
		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
	if (rc)
		goto err2;
	}

	return rc;
err2:
@@ -1957,10 +1938,8 @@ qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
		p_virt = dma_alloc_coherent(&cdev->pdev->dev,
					    QED_CHAIN_PAGE_SIZE,
					    &p_phys, GFP_KERNEL);
		if (!p_virt) {
			DP_NOTICE(cdev, "Failed to allocate chain memory\n");
		if (!p_virt)
			return -ENOMEM;
		}

		if (i == 0) {
			qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -1990,10 +1969,8 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)

	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
				    QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
	if (!p_virt) {
		DP_NOTICE(cdev, "Failed to allocate chain memory\n");
	if (!p_virt)
		return -ENOMEM;
	}

	qed_chain_init_mem(p_chain, p_virt, p_phys);
	qed_chain_reset(p_chain);
@@ -2010,13 +1987,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
	void *p_virt = NULL;

	size = page_cnt * sizeof(*pp_virt_addr_tbl);
	pp_virt_addr_tbl = vmalloc(size);
	if (!pp_virt_addr_tbl) {
		DP_NOTICE(cdev,
			  "Failed to allocate memory for the chain virtual addresses table\n");
	pp_virt_addr_tbl = vzalloc(size);
	if (!pp_virt_addr_tbl)
		return -ENOMEM;
	}
	memset(pp_virt_addr_tbl, 0, size);

	/* The allocation of the PBL table is done with its full size, since it
	 * is expected to be successive.
@@ -2029,19 +2002,15 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
					size, &p_pbl_phys, GFP_KERNEL);
	qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
			       pp_virt_addr_tbl);
	if (!p_pbl_virt) {
		DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
	if (!p_pbl_virt)
		return -ENOMEM;
	}

	for (i = 0; i < page_cnt; i++) {
		p_virt = dma_alloc_coherent(&cdev->pdev->dev,
					    QED_CHAIN_PAGE_SIZE,
					    &p_phys, GFP_KERNEL);
		if (!p_virt) {
			DP_NOTICE(cdev, "Failed to allocate chain memory\n");
		if (!p_virt)
			return -ENOMEM;
		}

		if (i == 0) {
			qed_chain_init_mem(p_chain, p_virt, p_phys);
@@ -2076,7 +2045,8 @@ int qed_chain_alloc(struct qed_dev *cdev,
	rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
	if (rc) {
		DP_NOTICE(cdev,
			  "Cannot allocate a chain with the given arguments:\n"
			  "Cannot allocate a chain with the given arguments:\n");
		DP_NOTICE(cdev,
			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
			  intended_use, mode, cnt_type, num_elems, elem_size);
		return rc;
+3 −9
Original line number Diff line number Diff line
@@ -482,28 +482,22 @@ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)

	*p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
				     sizeof(u32), p_addr, GFP_KERNEL);
	if (!*p_comp) {
		DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
	if (!*p_comp)
		goto err;
	}

	p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
	*p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
				    sizeof(struct dmae_cmd),
				    p_addr, GFP_KERNEL);
	if (!*p_cmd) {
		DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
	if (!*p_cmd)
		goto err;
	}

	p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
	*p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
				     sizeof(u32) * DMAE_MAX_RW_SIZE,
				     p_addr, GFP_KERNEL);
	if (!*p_buff) {
		DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
	if (!*p_buff)
		goto err;
	}

	p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;

+1 −3
Original line number Diff line number Diff line
@@ -460,10 +460,8 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
	init_ops = cdev->fw_data->init_ops;

	p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
	if (!p_hwfn->unzip_buf) {
		DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
	if (!p_hwfn->unzip_buf)
		return -ENOMEM;
	}

	for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
		union init_op *cmd = &init_ops[cmd_num];
Loading