Commit 3e7e1193 authored by Artemy Kovalyov's avatar Artemy Kovalyov Committed by Doug Ledford
Browse files

IB: Replace ib_umem page_size by page_shift



Size of pages are held by struct ib_umem in page_size field.

It is better to store it as an exponent, because page size by nature
is always power-of-two and used as a factor, divisor or ilog2's argument.

The conversion of page_size to be page_shift allows to have portable
code and avoid following error while compiling on ARM:

  ERROR: "__aeabi_uldivmod" [drivers/infiniband/core/ib_core.ko] undefined!

CC: Selvin Xavier <selvin.xavier@broadcom.com>
CC: Steve Wise <swise@chelsio.com>
CC: Lijun Ou <oulijun@huawei.com>
CC: Shiraz Saleem <shiraz.saleem@intel.com>
CC: Adit Ranadive <aditr@vmware.com>
CC: Dennis Dalessandro <dennis.dalessandro@intel.com>
CC: Ram Amrani <Ram.Amrani@Cavium.com>
Signed-off-by: default avatarArtemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Acked-by: default avatarRam Amrani <Ram.Amrani@cavium.com>
Acked-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Acked-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Acked-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Acked-by: default avatarAdit Ranadive <aditr@vmware.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 8d2216be
Loading
Loading
Loading
Loading
+6 −9
Original line number Diff line number Diff line
@@ -118,7 +118,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
	umem->context    = context;
	umem->length     = size;
	umem->address    = addr;
	umem->page_size = PAGE_SIZE;
	umem->page_shift = PAGE_SHIFT;
	umem->pid	 = get_task_pid(current, PIDTYPE_PID);
	/*
	 * We ask for writable memory if any of the following
@@ -315,7 +315,6 @@ EXPORT_SYMBOL(ib_umem_release);

int ib_umem_page_count(struct ib_umem *umem)
{
	int shift;
	int i;
	int n;
	struct scatterlist *sg;
@@ -323,11 +322,9 @@ int ib_umem_page_count(struct ib_umem *umem)
	if (umem->odp_data)
		return ib_umem_num_pages(umem);

	shift = ilog2(umem->page_size);

	n = 0;
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
		n += sg_dma_len(sg) >> shift;
		n += sg_dma_len(sg) >> umem->page_shift;

	return n;
}
+6 −6
Original line number Diff line number Diff line
@@ -257,7 +257,7 @@ struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
	umem->context    = context;
	umem->length     = size;
	umem->address    = addr;
	umem->page_size = PAGE_SIZE;
	umem->page_shift = PAGE_SHIFT;
	umem->writable   = 1;

	odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
@@ -707,7 +707,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
	 * invalidations, so we must make sure we free each page only
	 * once. */
	mutex_lock(&umem->odp_data->umem_mutex);
	for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
	for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
		idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
		if (umem->odp_data->page_list[idx]) {
			struct page *page = umem->odp_data->page_list[idx];
+6 −6
Original line number Diff line number Diff line
@@ -3016,7 +3016,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
	struct bnxt_re_mr *mr;
	struct ib_umem *umem;
	u64 *pbl_tbl, *pbl_tbl_orig;
	int i, umem_pgs, pages, page_shift, rc;
	int i, umem_pgs, pages, rc;
	struct scatterlist *sg;
	int entry;

@@ -3062,22 +3062,22 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
	}
	pbl_tbl_orig = pbl_tbl;

	page_shift = ilog2(umem->page_size);
	if (umem->hugetlb) {
		dev_err(rdev_to_dev(rdev), "umem hugetlb not supported!");
		rc = -EFAULT;
		goto fail;
	}
	if (umem->page_size != PAGE_SIZE) {
		dev_err(rdev_to_dev(rdev), "umem page size unsupported!");

	if (umem->page_shift != PAGE_SHIFT) {
		dev_err(rdev_to_dev(rdev), "umem page shift unsupported!");
		rc = -EFAULT;
		goto fail;
	}
	/* Map umem buf ptrs to the PBL */
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
		pages = sg_dma_len(sg) >> page_shift;
		pages = sg_dma_len(sg) >> umem->page_shift;
		for (i = 0; i < pages; i++, pbl_tbl++)
			*pbl_tbl = sg_dma_address(sg) + (i << page_shift);
			*pbl_tbl = sg_dma_address(sg) + (i << umem->page_shift);
	}
	rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl_orig,
			       umem_pgs, false);
+2 −2
Original line number Diff line number Diff line
@@ -581,7 +581,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
		return ERR_PTR(err);
	}

	shift = ffs(mhp->umem->page_size) - 1;
	shift = mhp->umem->page_shift;

	n = mhp->umem->nmap;

@@ -601,7 +601,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
			len = sg_dma_len(sg) >> shift;
			for (k = 0; k < len; ++k) {
				pages[i++] = cpu_to_be64(sg_dma_address(sg) +
					mhp->umem->page_size * k);
							 (k << shift));
				if (i == PAGE_SIZE / sizeof *pages) {
					err = iwch_write_pbl(mhp, pages, i, n);
					if (err)
+2 −2
Original line number Diff line number Diff line
@@ -515,7 +515,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
		return ERR_PTR(err);
	}

	shift = ffs(mhp->umem->page_size) - 1;
	shift = mhp->umem->page_shift;

	n = mhp->umem->nmap;
	err = alloc_pbl(mhp, n);
@@ -534,7 +534,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
		len = sg_dma_len(sg) >> shift;
		for (k = 0; k < len; ++k) {
			pages[i++] = cpu_to_be64(sg_dma_address(sg) +
				mhp->umem->page_size * k);
						 (k << shift));
			if (i == PAGE_SIZE / sizeof *pages) {
				err = write_pbl(&mhp->rhp->rdev,
				      pages,
Loading