Commit ee1ef05d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Joerg Roedel
Browse files

iommu/dma: Refactor iommu_dma_alloc, part 2



All the logic in iommu_dma_alloc that deals with page allocation from
the CMA or page allocators can be split into a self-contained helper,
and we can than map the result of that or the atomic pool allocation
with the iommu later.  This also allows reusing __iommu_dma_free to
tear down the allocations and MMU mappings when the IOMMU mapping
fails.

Based on a patch from Robin Murphy.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 9ad5d6ed
Loading
Loading
Loading
Loading
+35 −30
Original line number Diff line number Diff line
@@ -972,35 +972,14 @@ static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
	__iommu_dma_free(dev, size, cpu_addr);
}

static void *iommu_dma_alloc(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
		struct page **pagep, gfp_t gfp, unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	size_t alloc_size = PAGE_ALIGN(size);
	struct page *page = NULL;
	void *cpu_addr;

	gfp |= __GFP_ZERO;

	if (gfpflags_allow_blocking(gfp) &&
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);

	if (!gfpflags_allow_blocking(gfp) && !coherent) {
		cpu_addr = dma_alloc_from_pool(alloc_size, &page, gfp);
		if (!cpu_addr)
			return NULL;

		*handle = __iommu_dma_map(dev, page_to_phys(page), size,
					  ioprot);
		if (*handle == DMA_MAPPING_ERROR) {
			dma_free_from_pool(cpu_addr, alloc_size);
			return NULL;
		}
		return cpu_addr;
	}

	if (gfpflags_allow_blocking(gfp))
		page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
						 get_order(alloc_size),
@@ -1010,33 +989,59 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
	if (!page)
		return NULL;

	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
	if (*handle == DMA_MAPPING_ERROR)
		goto out_free_pages;

	if (!coherent || PageHighMem(page)) {
		pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);

		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
				VM_USERMAP, prot, __builtin_return_address(0));
		if (!cpu_addr)
			goto out_unmap;
			goto out_free_pages;

		if (!coherent)
			arch_dma_prep_coherent(page, size);
	} else {
		cpu_addr = page_address(page);
	}

	*pagep = page;
	memset(cpu_addr, 0, alloc_size);
	return cpu_addr;
out_unmap:
	__iommu_dma_unmap(dev, *handle, size);
out_free_pages:
	if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
		__free_pages(page, get_order(alloc_size));
	return NULL;
}

static void *iommu_dma_alloc(struct device *dev, size_t size,
		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	struct page *page = NULL;
	void *cpu_addr;

	gfp |= __GFP_ZERO;

	if (gfpflags_allow_blocking(gfp) &&
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);

	if (!gfpflags_allow_blocking(gfp) && !coherent)
		cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
	else
		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
	if (!cpu_addr)
		return NULL;

	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
	if (*handle == DMA_MAPPING_ERROR) {
		__iommu_dma_free(dev, size, cpu_addr);
		return NULL;
	}

	return cpu_addr;
}

static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma,
			      unsigned long pfn, size_t size)
{