Commit 80e61fcd authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

arc: remove the partial DMA_ATTR_NON_CONSISTENT support



The arc DMA code supports DMA_ATTR_NON_CONSISTENT allocations, but does
not provide a cache_sync operation.  This means any user of it will
never be able to actually transfer cache ownership and thus cause
coherency bugs.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarEvgeniy Paltsev <paltsev@synopsys.com>
Tested-by: default avatarEvgeniy Paltsev <paltsev@synopsys.com>
parent 34ab0316
Loading
Loading
Loading
Loading
+6 −15
Original line number Diff line number Diff line
@@ -24,7 +24,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
	struct page *page;
	phys_addr_t paddr;
	void *kvaddr;
	bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);

	/*
	 * __GFP_HIGHMEM flag is cleared by upper layer functions
@@ -46,15 +45,11 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
	 * A coherent buffer needs MMU mapping to enforce non-cachability.
	 * kvaddr is kernel Virtual address (0x7000_0000 based).
	 */
	if (need_coh) {
	kvaddr = ioremap_nocache(paddr, size);
	if (kvaddr == NULL) {
		__free_pages(page, order);
		return NULL;
	}
	} else {
		kvaddr = (void *)(u32)paddr;
	}

	/*
	 * Evict any existing L1 and/or L2 lines for the backing page
@@ -66,9 +61,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
	 * Currently flush_cache_vmap nukes the L1 cache completely which
	 * will be optimized as a separate commit
	 */
	if (need_coh)
	dma_cache_wback_inv(paddr, size);

	return kvaddr;
}

@@ -78,9 +71,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
	phys_addr_t paddr = dma_handle;
	struct page *page = virt_to_page(paddr);

	if (!(attrs & DMA_ATTR_NON_CONSISTENT))
	iounmap((void __force __iomem *)vaddr);

	__free_pages(page, get_order(size));
}