Commit 6f43bae3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - fix an integer overflow in the coherent pool (Kevin Grandemange)

 - provide support for in-place uncached remapping and use that for
   openrisc

 - fix the arm coherent allocator to take the bus limit into account

* tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping:
  ARM/dma-mapping: merge __dma_supported into arm_dma_supported
  ARM/dma-mapping: take the bus limit into account in __dma_alloc
  ARM/dma-mapping: remove get_coherent_dma_mask
  openrisc: use the generic in-place uncached DMA allocator
  dma-direct: provide a arch_dma_clear_uncached hook
  dma-direct: make uncached_kernel_address more general
  dma-direct: consolidate the error handling in dma_direct_alloc_pages
  dma-direct: remove the cached_kernel_address hook
  dma-coherent: fix integer overflow in the reserved-memory dma allocation
parents 1e396a5d fd27a526
Loading
Loading
Loading
Loading
+11 −4
Original line number Original line Diff line number Diff line
@@ -248,11 +248,18 @@ config ARCH_HAS_SET_DIRECT_MAP
	bool
	bool


#
#
# Select if arch has an uncached kernel segment and provides the
# Select if the architecture provides the arch_dma_set_uncached symbol to
# uncached_kernel_address / cached_kernel_address symbols to use it
# either provide an uncached segement alias for a DMA allocation, or
# to remap the page tables in place.
#
#
config ARCH_HAS_UNCACHED_SEGMENT
config ARCH_HAS_DMA_SET_UNCACHED
	select ARCH_HAS_DMA_PREP_COHERENT
	bool

#
# Select if the architectures provides the arch_dma_clear_uncached symbol
# to undo an in-place page table remap for uncached access.
#
config ARCH_HAS_DMA_CLEAR_UNCACHED
	bool
	bool


# Select if arch init_task must go in the __init_task_data section
# Select if arch init_task must go in the __init_task_data section
+0 −2
Original line number Original line Diff line number Diff line
@@ -33,7 +33,5 @@ int arm_iommu_attach_device(struct device *dev,
					struct dma_iommu_mapping *mapping);
					struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
void arm_iommu_detach_device(struct device *dev);


int arm_dma_supported(struct device *dev, u64 mask);

#endif /* __KERNEL__ */
#endif /* __KERNEL__ */
#endif
#endif
+18 −58
Original line number Original line Diff line number Diff line
@@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev,
	__dma_page_cpu_to_dev(page, offset, size, dir);
	__dma_page_cpu_to_dev(page, offset, size, dir);
}
}


/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask
 * to this function.
 */
static int arm_dma_supported(struct device *dev, u64 mask)
{
	unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);

	/*
	 * Translate the device's DMA mask to a PFN limit.  This
	 * PFN number includes the page which we can DMA to.
	 */
	return dma_to_pfn(dev, mask) >= max_dma_pfn;
}

const struct dma_map_ops arm_dma_ops = {
const struct dma_map_ops arm_dma_ops = {
	.alloc			= arm_dma_alloc,
	.alloc			= arm_dma_alloc,
	.free			= arm_dma_free,
	.free			= arm_dma_free,
@@ -219,49 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = {
};
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
EXPORT_SYMBOL(arm_coherent_dma_ops);


static int __dma_supported(struct device *dev, u64 mask, bool warn)
{
	unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);

	/*
	 * Translate the device's DMA mask to a PFN limit.  This
	 * PFN number includes the page which we can DMA to.
	 */
	if (dma_to_pfn(dev, mask) < max_dma_pfn) {
		if (warn)
			dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
				 mask,
				 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
				 max_dma_pfn + 1);
		return 0;
	}

	return 1;
}

static u64 get_coherent_dma_mask(struct device *dev)
{
	u64 mask = (u64)DMA_BIT_MASK(32);

	if (dev) {
		mask = dev->coherent_dma_mask;

		/*
		 * Sanity check the DMA mask - it must be non-zero, and
		 * must be able to be satisfied by a DMA allocation.
		 */
		if (mask == 0) {
			dev_warn(dev, "coherent DMA mask is unset\n");
			return 0;
		}

		if (!__dma_supported(dev, mask, true))
			return 0;
	}

	return mask;
}

static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
{
	/*
	/*
@@ -688,7 +662,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
			 gfp_t gfp, pgprot_t prot, bool is_coherent,
			 gfp_t gfp, pgprot_t prot, bool is_coherent,
			 unsigned long attrs, const void *caller)
			 unsigned long attrs, const void *caller)
{
{
	u64 mask = get_coherent_dma_mask(dev);
	u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
	struct page *page = NULL;
	struct page *page = NULL;
	void *addr;
	void *addr;
	bool allowblock, cma;
	bool allowblock, cma;
@@ -712,9 +686,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
	}
	}
#endif
#endif


	if (!mask)
		return NULL;

	buf = kzalloc(sizeof(*buf),
	buf = kzalloc(sizeof(*buf),
		      gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
		      gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
	if (!buf)
	if (!buf)
@@ -1087,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
					    dir);
					    dir);
}
}


/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask
 * to this function.
 */
int arm_dma_supported(struct device *dev, u64 mask)
{
	return __dma_supported(dev, mask, false);
}

static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
{
	/*
	/*
+1 −1
Original line number Original line Diff line number Diff line
@@ -8,7 +8,7 @@ config MICROBLAZE
	select ARCH_HAS_GCOV_PROFILE_ALL
	select ARCH_HAS_GCOV_PROFILE_ALL
	select ARCH_HAS_SYNC_DMA_FOR_CPU
	select ARCH_HAS_SYNC_DMA_FOR_CPU
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
	select ARCH_HAS_UNCACHED_SEGMENT if !MMU
	select ARCH_HAS_DMA_SET_UNCACHED if !MMU
	select ARCH_MIGHT_HAVE_PC_PARPORT
	select ARCH_MIGHT_HAVE_PC_PARPORT
	select ARCH_WANT_IPC_PARSE_VERSION
	select ARCH_WANT_IPC_PARSE_VERSION
	select BUILDTIME_TABLE_SORT
	select BUILDTIME_TABLE_SORT
+1 −8
Original line number Original line Diff line number Diff line
@@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
#define UNCACHED_SHADOW_MASK 0
#define UNCACHED_SHADOW_MASK 0
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */


void *uncached_kernel_address(void *ptr)
void *arch_dma_set_uncached(void *ptr, size_t size)
{
{
	unsigned long addr = (unsigned long)ptr;
	unsigned long addr = (unsigned long)ptr;


@@ -49,11 +49,4 @@ void *uncached_kernel_address(void *ptr)
		pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
		pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
	return (void *)addr;
	return (void *)addr;
}
}

void *cached_kernel_address(void *ptr)
{
	unsigned long addr = (unsigned long)ptr;

	return (void *)(addr & ~UNCACHED_SHADOW_MASK);
}
#endif /* CONFIG_MMU */
#endif /* CONFIG_MMU */
Loading