Commit e8d39a90 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

dma-iommu: implement ->alloc_noncoherent



Implement the alloc_noncoherent method to provide memory that is neither
coherent not contiguous.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent de7cf917
Loading
Loading
Loading
Loading
+37 −4
Original line number Diff line number Diff line
@@ -572,6 +572,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
 * @size: Size of buffer in bytes
 * @dma_handle: Out argument for allocated DMA handle
 * @gfp: Allocation flags
 * @prot: pgprot_t to use for the remapped mapping
 * @attrs: DMA attributes for this allocation
 *
 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
@@ -580,14 +581,14 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
 * Return: Mapped virtual address, or NULL on failure.
 */
static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
		dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
		unsigned long attrs)
{
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
	bool coherent = dev_is_dma_coherent(dev);
	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
	pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
	struct page **pages;
	struct sg_table sgt;
@@ -1030,8 +1031,10 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
	gfp |= __GFP_ZERO;

	if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
		return iommu_dma_alloc_remap(dev, size, handle, gfp,
				dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
	}

	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    !gfpflags_allow_blocking(gfp) && !coherent)
@@ -1052,6 +1055,34 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
	return cpu_addr;
}

#ifdef CONFIG_DMA_REMAP
static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
		dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
{
	if (!gfpflags_allow_blocking(gfp)) {
		struct page *page;

		page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
		if (!page)
			return NULL;
		return page_address(page);
	}

	return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
				     PAGE_KERNEL, 0);
}

static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
		void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
{
	__iommu_dma_unmap(dev, handle, size);
	__iommu_dma_free(dev, size, cpu_addr);
}
#else
#define iommu_dma_alloc_noncoherent		NULL
#define iommu_dma_free_noncoherent		NULL
#endif /* CONFIG_DMA_REMAP */

static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
@@ -1122,6 +1153,8 @@ static const struct dma_map_ops iommu_dma_ops = {
	.free			= iommu_dma_free,
	.alloc_pages		= dma_common_alloc_pages,
	.free_pages		= dma_common_free_pages,
	.alloc_noncoherent	= iommu_dma_alloc_noncoherent,
	.free_noncoherent	= iommu_dma_free_noncoherent,
	.mmap			= iommu_dma_mmap,
	.get_sgtable		= iommu_dma_get_sgtable,
	.map_page		= iommu_dma_map_page,