Commit 58b04406 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

dma-mapping: consolidate the dma mmap implementations



The only functional differences (modulo a few missing fixes in the arch
code) is that architectures without coherent caches need a hook to
convert a virtual or dma address into a pfn, given that we don't have
the kernel linear mapping available for the otherwise easy virt_to_page
call.  As a side effect we can support mmap of the per-device coherent
area even on architectures not providing the callback, and we make
previous dangerous default methods dma_common_mmap actually save for
non-coherent architectures by rejecting it without the right helper.

In addition to that we need a hook so that some architectures can
override the protection bits when mmaping a dma coherent allocations.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: Paul Burton <paul.burton@mips.com> # MIPS parts
parent bc3ec75d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
config ARC
	def_bool y
	select ARC_TIMERS
	select ARCH_HAS_DMA_COHERENT_TO_PFN
	select ARCH_HAS_PTE_SPECIAL
	select ARCH_HAS_SYNC_DMA_FOR_CPU
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -18,7 +19,6 @@ config ARC
	select CLONE_BACKWARDS
	select COMMON_CLK
	select DMA_DIRECT_OPS
	select DMA_NONCOHERENT_MMAP
	select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
	select GENERIC_CLOCKEVENTS
	select GENERIC_FIND_FIRST_BIT
+3 −22
Original line number Diff line number Diff line
@@ -84,29 +84,10 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
	__free_pages(page, get_order(size));
}

int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
		dma_addr_t dma_addr)
{
	unsigned long user_count = vma_pages(vma);
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long pfn = __phys_to_pfn(dma_addr);
	unsigned long off = vma->vm_pgoff;
	int ret = -ENXIO;

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off < count && user_count <= (count - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      user_count << PAGE_SHIFT,
				      vma->vm_page_prot);
	}

	return ret;
	return __phys_to_pfn(dma_addr);
}

/*
+1 −1
Original line number Diff line number Diff line
@@ -91,7 +91,7 @@ static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
		return ret;

	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}


+1 −1
Original line number Diff line number Diff line
config MICROBLAZE
	def_bool y
	select ARCH_NO_SWAP
	select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
	select ARCH_HAS_GCOV_PROFILE_ALL
	select ARCH_HAS_SYNC_DMA_FOR_CPU
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -12,7 +13,6 @@ config MICROBLAZE
	select CLONE_BACKWARDS3
	select COMMON_CLK
	select DMA_DIRECT_OPS
	select DMA_NONCOHERENT_MMAP
	select GENERIC_ATOMIC64
	select GENERIC_CLOCKEVENTS
	select GENERIC_CPU_DEVICES
+0 −2
Original line number Diff line number Diff line
@@ -553,8 +553,6 @@ void __init *early_get_page(void);

extern unsigned long ioremap_bot, ioremap_base;

unsigned long consistent_virt_to_pfn(void *vaddr);

void setup_memory(void);
#endif /* __ASSEMBLY__ */

Loading