Commit 9a4ab94a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Joerg Roedel
Browse files

iommu/dma: Merge the CMA and alloc_pages allocation paths



Instead of having a separate code path for the non-blocking alloc_pages
and CMA allocations paths merge them into one.  There is a slight
behavior change here in that we try the page allocator if CMA fails.
This matches what dma-direct and other iommu drivers do and will be
needed to use the dma-iommu code on architectures without DMA remapping
later on.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 8680aa5a
Loading
Loading
Loading
Loading
+12 −20
Original line number Diff line number Diff line
@@ -974,7 +974,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	size_t iosize = size;
	struct page *page;
	struct page *page = NULL;
	void *addr;

	size = PAGE_ALIGN(size);
@@ -984,35 +984,26 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
		return iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);

	if (!gfpflags_allow_blocking(gfp)) {
		/*
		 * In atomic context we can't remap anything, so we'll only
		 * get the virtually contiguous buffer we need by way of a
		 * physically contiguous allocation.
		 */
		if (coherent) {
			page = alloc_pages(gfp, get_order(size));
			addr = page ? page_address(page) : NULL;
		} else {
	if (!gfpflags_allow_blocking(gfp) && !coherent) {
		addr = dma_alloc_from_pool(size, &page, gfp);
		}
		if (!addr)
			return NULL;

		*handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
					  ioprot);
		if (*handle == DMA_MAPPING_ERROR) {
			if (coherent)
				__free_pages(page, get_order(size));
			else
			dma_free_from_pool(addr, size);
			return NULL;
		}
		return addr;
	}

	if (gfpflags_allow_blocking(gfp))
		page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
					 get_order(size), gfp & __GFP_NOWARN);
						 get_order(size),
						 gfp & __GFP_NOWARN);
	if (!page)
		page = alloc_pages(gfp, get_order(size));
	if (!page)
		return NULL;

@@ -1038,7 +1029,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
out_unmap:
	__iommu_dma_unmap(dev, *handle, iosize);
out_free_pages:
	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
	if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
		__free_pages(page, get_order(size));
	return NULL;
}