Commit 782e6769 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

dma-mapping: provide a generic dma-noncoherent implementation



Add a new dma_map_ops implementation that uses dma-direct for the
address mapping of streaming mappings, and which requires arch-specific
implemenations of coherent allocate/free.

Architectures have to provide flushing helpers to ownership trasnfers
to the device and/or CPU, and can provide optional implementations of
the coherent mmap functionality, and the cache_flush routines for
non-coherent long term allocations.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarAlexey Brodkin <abrodkin@synopsys.com>
Acked-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent 35ddb69c
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -4334,12 +4334,14 @@ W: http://git.infradead.org/users/hch/dma-mapping.git
S:	Supported
F:	lib/dma-debug.c
F:	lib/dma-direct.c
F:	lib/dma-noncoherent.c
F:	lib/dma-virt.c
F:	drivers/base/dma-mapping.c
F:	drivers/base/dma-coherent.c
F:	include/asm-generic/dma-mapping.h
F:	include/linux/dma-direct.h
F:	include/linux/dma-mapping.h
F:	include/linux/dma-noncoherent.h

DME1737 HARDWARE MONITOR DRIVER
M:	Juerg Haefliger <juergh@gmail.com>
+9 −0
Original line number Diff line number Diff line
@@ -4,7 +4,16 @@

static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
	/*
	 * Use the non-coherent ops if available.  If an architecture wants a
	 * more fine-grained selection of operations it will have to implement
	 * get_arch_dma_ops itself or use the per-device dma_ops.
	 */
#ifdef CONFIG_DMA_NONCOHERENT_OPS
	return &dma_noncoherent_ops;
#else
	return &dma_direct_ops;
#endif
}

#endif /* _ASM_GENERIC_DMA_MAPPING_H */
+6 −1
Original line number Diff line number Diff line
@@ -59,6 +59,11 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_addr, unsigned long attrs);
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
		enum dma_data_direction dir, unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask);

int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr);
#endif /* _LINUX_DMA_DIRECT_H */
+1 −0
Original line number Diff line number Diff line
@@ -136,6 +136,7 @@ struct dma_map_ops {
};

extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_noncoherent_ops;
extern const struct dma_map_ops dma_virt_ops;

#define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+47 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_NONCOHERENT_H
#define _LINUX_DMA_NONCOHERENT_H 1

#include <linux/dma-mapping.h>

void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_addr, unsigned long attrs);

#ifdef CONFIG_DMA_NONCOHERENT_MMAP
int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs);
#else
#define arch_dma_mmap NULL
#endif /* CONFIG_DMA_NONCOHERENT_MMAP */

#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
		enum dma_data_direction direction);
#else
#define arch_dma_cache_sync NULL
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */

#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir);
#else
static inline void arch_sync_dma_for_device(struct device *dev,
		phys_addr_t paddr, size_t size, enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */

#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir);
#else
static inline void arch_sync_dma_for_cpu(struct device *dev,
		phys_addr_t paddr, size_t size, enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */

#endif /* _LINUX_DMA_NONCOHERENT_H */
Loading