Commit 27b7fb1a authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Doug Ledford
Browse files

RDMA/mlx5: Fix MR npages calculation for IB_ACCESS_HUGETLB



When ODP is enabled with IB_ACCESS_HUGETLB then the required pages
should be calculated based on the extent of the MR, which is rounded
to the nearest huge page alignment.

Fixes: d2183c6f ("RDMA/umem: Move page_shift from ib_umem to ib_odp_umem")
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Link: https://lore.kernel.org/r/20190815083834.9245-5-leon@kernel.org


Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent d1abaeb3
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);

int ib_umem_page_count(struct ib_umem *umem)
{
	int i;
	int n;
	int i, n = 0;
	struct scatterlist *sg;

	if (umem->is_odp)
		return ib_umem_num_pages(umem);

	n = 0;
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
		n += sg_dma_len(sg) >> PAGE_SHIFT;

+3 −2
Original line number Diff line number Diff line
@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
	int entry;

	if (umem->is_odp) {
		unsigned int page_shift = to_ib_umem_odp(umem)->page_shift;
		struct ib_umem_odp *odp = to_ib_umem_odp(umem);
		unsigned int page_shift = odp->page_shift;

		*ncont = ib_umem_page_count(umem);
		*ncont = ib_umem_odp_num_pages(odp);
		*count = *ncont << (page_shift - PAGE_SHIFT);
		*shift = page_shift;
		if (order)