Commit 71fe89ce authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Will Deacon
Browse files

dma-iommu: remove __iommu_dma_mmap



The function has a single caller, so open code it there and take
advantage of the precalculated page count variable.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20201209112019.2625029-1-hch@lst.de


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent fefe8527
Loading
Loading
Loading
Loading
+1 −16
Original line number Diff line number Diff line
@@ -752,21 +752,6 @@ out_free_pages:
	return NULL;
}

/**
 * __iommu_dma_mmap - Map a buffer into provided user VMA
 * @pages: Array representing buffer from __iommu_dma_alloc()
 * @size: Size of buffer in bytes
 * @vma: VMA describing requested userspace mapping
 *
 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
 * for verifying the correct size and protection of @vma beforehand.
 */
static int __iommu_dma_mmap(struct page **pages, size_t size,
		struct vm_area_struct *vma)
{
	return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
}

static void iommu_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
@@ -1287,7 +1272,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		struct page **pages = dma_common_find_pages(cpu_addr);

		if (pages)
			return __iommu_dma_mmap(pages, size, vma);
			return vm_map_pages(vma, pages, nr_pages);
		pfn = vmalloc_to_pfn(cpu_addr);
	} else {
		pfn = page_to_pfn(virt_to_page(cpu_addr));