Commit f05baa06 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dma-mapping-5.8-4' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - fix dma coherent mmap in nommu (me)

 - more AMD SEV fallout (David Rientjes, me)

 - fix alignment in dma_common_*_remap (Eric Auger)

* tag 'dma-mapping-5.8-4' of git://git.infradead.org/users/hch/dma-mapping:
  dma-remap: align the size in dma_common_*_remap()
  dma-mapping: DMA_COHERENT_POOL should select GENERIC_ALLOCATOR
  dma-direct: add missing set_memory_decrypted() for coherent mapping
  dma-direct: check return value when encrypting or decrypting memory
  dma-direct: re-encrypt memory if dma_direct_alloc_pages() fails
  dma-direct: always align allocation size in dma_direct_alloc_pages()
  dma-direct: mark __dma_direct_alloc_pages static
  dma-direct: re-enable mmap for !CONFIG_MMU
parents 4e99b321 8e36baf9
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -77,8 +77,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_addr, unsigned long attrs);
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
		gfp_t gfp, unsigned long attrs);
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs);
+2 −1
Original line number Diff line number Diff line
@@ -71,15 +71,16 @@ config SWIOTLB
# in the pagetables
#
config DMA_NONCOHERENT_MMAP
	default y if !MMU
	bool

config DMA_COHERENT_POOL
	select GENERIC_ALLOCATOR
	bool

config DMA_REMAP
	bool
	depends on MMU
	select GENERIC_ALLOCATOR
	select DMA_NONCOHERENT_MMAP

config DMA_DIRECT_REMAP
+34 −25
Original line number Diff line number Diff line
@@ -109,14 +109,15 @@ static inline bool dma_should_free_from_pool(struct device *dev,
	return false;
}

struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
		gfp_t gfp, unsigned long attrs)
{
	size_t alloc_size = PAGE_ALIGN(size);
	int node = dev_to_node(dev);
	struct page *page = NULL;
	u64 phys_limit;

	WARN_ON_ONCE(!PAGE_ALIGNED(size));

	if (attrs & DMA_ATTR_NO_WARN)
		gfp |= __GFP_NOWARN;

@@ -124,14 +125,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
	gfp &= ~__GFP_ZERO;
	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
					   &phys_limit);
	page = dma_alloc_contiguous(dev, alloc_size, gfp);
	page = dma_alloc_contiguous(dev, size, gfp);
	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
		dma_free_contiguous(dev, page, alloc_size);
		dma_free_contiguous(dev, page, size);
		page = NULL;
	}
again:
	if (!page)
		page = alloc_pages_node(node, gfp, get_order(alloc_size));
		page = alloc_pages_node(node, gfp, get_order(size));
	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
		dma_free_contiguous(dev, page, size);
		page = NULL;
@@ -157,9 +158,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
{
	struct page *page;
	void *ret;
	int err;

	size = PAGE_ALIGN(size);

	if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
		ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
		ret = dma_alloc_from_pool(dev, size, &page, gfp);
		if (!ret)
			return NULL;
		goto done;
@@ -183,14 +187,20 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
	     dma_alloc_need_uncached(dev, attrs)) ||
	    (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
		/* remove any dirty cache lines on the kernel alias */
		arch_dma_prep_coherent(page, PAGE_ALIGN(size));
		arch_dma_prep_coherent(page, size);

		/* create a coherent mapping */
		ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
		ret = dma_common_contiguous_remap(page, size,
				dma_pgprot(dev, PAGE_KERNEL, attrs),
				__builtin_return_address(0));
		if (!ret)
			goto out_free_pages;
		if (force_dma_unencrypted(dev)) {
			err = set_memory_decrypted((unsigned long)ret,
						   1 << get_order(size));
			if (err)
				goto out_free_pages;
		}
		memset(ret, 0, size);
		goto done;
	}
@@ -207,8 +217,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
	}

	ret = page_address(page);
	if (force_dma_unencrypted(dev))
		set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
	if (force_dma_unencrypted(dev)) {
		err = set_memory_decrypted((unsigned long)ret,
					   1 << get_order(size));
		if (err)
			goto out_free_pages;
	}

	memset(ret, 0, size);

@@ -217,7 +231,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
		arch_dma_prep_coherent(page, size);
		ret = arch_dma_set_uncached(ret, size);
		if (IS_ERR(ret))
			goto out_free_pages;
			goto out_encrypt_pages;
	}
done:
	if (force_dma_unencrypted(dev))
@@ -225,6 +239,15 @@ done:
	else
		*dma_handle = phys_to_dma(dev, page_to_phys(page));
	return ret;

out_encrypt_pages:
	if (force_dma_unencrypted(dev)) {
		err = set_memory_encrypted((unsigned long)page_address(page),
					   1 << get_order(size));
		/* If memory cannot be re-encrypted, it must be leaked */
		if (err)
			return NULL;
	}
out_free_pages:
	dma_free_contiguous(dev, page, size);
	return NULL;
@@ -459,7 +482,6 @@ int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
	return ret;
}

#ifdef CONFIG_MMU
bool dma_direct_can_mmap(struct device *dev)
{
	return dev_is_dma_coherent(dev) ||
@@ -485,19 +507,6 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
			user_count << PAGE_SHIFT, vma->vm_page_prot);
}
#else /* CONFIG_MMU */
bool dma_direct_can_mmap(struct device *dev)
{
	return false;
}

int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
		unsigned long attrs)
{
	return -ENXIO;
}
#endif /* CONFIG_MMU */

int dma_direct_supported(struct device *dev, u64 mask)
{
+3 −2
Original line number Diff line number Diff line
@@ -24,7 +24,8 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
{
	void *vaddr;

	vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
	vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
		     VM_DMA_COHERENT, prot);
	if (vaddr)
		find_vm_area(vaddr)->pages = pages;
	return vaddr;
@@ -37,7 +38,7 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
void *dma_common_contiguous_remap(struct page *page, size_t size,
			pgprot_t prot, const void *caller)
{
	int count = size >> PAGE_SHIFT;
	int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page **pages;
	void *vaddr;
	int i;