Commit 76a19940 authored by David Rientjes's avatar David Rientjes Committed by Christoph Hellwig
Browse files

dma-direct: atomic allocations must come from atomic coherent pools



When a device requires unencrypted memory and the context does not allow
blocking, memory must be returned from the atomic coherent pools.

This avoids the remap when CONFIG_DMA_DIRECT_REMAP is not enabled and the
config only requires CONFIG_DMA_COHERENT_POOL.  This will be used for
CONFIG_AMD_MEM_ENCRYPT in a subsequent patch.

Keep all memory in these pools unencrypted.  When set_memory_decrypted()
fails, this prohibits the memory from being added.  If adding memory to
the genpool fails, and set_memory_encrypted() subsequently fails, there
is no alternative other than leaking the memory.

Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 54adadf9
Loading
Loading
Loading
Loading
+39 −7
Original line number Diff line number Diff line
@@ -76,6 +76,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
			min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}

/*
 * Decrypting memory is allowed to block, so if this device requires
 * unencrypted memory it must come from atomic pools.
 */
static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
					      unsigned long attrs)
{
	if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
		return false;
	if (gfpflags_allow_blocking(gfp))
		return false;
	if (force_dma_unencrypted(dev))
		return true;
	if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
		return false;
	if (dma_alloc_need_uncached(dev, attrs))
		return true;
	return false;
}

static inline bool dma_should_free_from_pool(struct device *dev,
					     unsigned long attrs)
{
	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
		return true;
	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
	    !force_dma_unencrypted(dev))
		return false;
	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
		return true;
	return false;
}

struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
		gfp_t gfp, unsigned long attrs)
{
@@ -125,9 +158,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
	struct page *page;
	void *ret;

	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    dma_alloc_need_uncached(dev, attrs) &&
	    !gfpflags_allow_blocking(gfp)) {
	if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
		ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
		if (!ret)
			return NULL;
@@ -204,6 +235,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{
	unsigned int page_order = get_order(size);

	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
	if (dma_should_free_from_pool(dev, attrs) &&
	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
		return;

	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
	    !force_dma_unencrypted(dev)) {
		/* cpu_addr is a struct page cookie, not a kernel address */
@@ -211,10 +247,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
		return;
	}

	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
		return;

	if (force_dma_unencrypted(dev))
		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);

+24 −3
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@
#include <linux/dma-contiguous.h>
#include <linux/init.h>
#include <linux/genalloc.h>
#include <linux/set_memory.h>
#include <linux/slab.h>
#include <linux/workqueue.h>

@@ -53,22 +54,42 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,

	arch_dma_prep_coherent(page, pool_size);

#ifdef CONFIG_DMA_DIRECT_REMAP
	addr = dma_common_contiguous_remap(page, pool_size,
					   pgprot_dmacoherent(PAGE_KERNEL),
					   __builtin_return_address(0));
	if (!addr)
		goto free_page;

#else
	addr = page_to_virt(page);
#endif
	/*
	 * Memory in the atomic DMA pools must be unencrypted, the pools do not
	 * shrink so no re-encryption occurs in dma_direct_free_pages().
	 */
	ret = set_memory_decrypted((unsigned long)page_to_virt(page),
				   1 << order);
	if (ret)
		goto remove_mapping;
	ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
				pool_size, NUMA_NO_NODE);
	if (ret)
		goto remove_mapping;
		goto encrypt_mapping;

	return 0;

encrypt_mapping:
	ret = set_memory_encrypted((unsigned long)page_to_virt(page),
				   1 << order);
	if (WARN_ON_ONCE(ret)) {
		/* Decrypt succeeded but encrypt failed, purposely leak */
		goto out;
	}
remove_mapping:
#ifdef CONFIG_DMA_DIRECT_REMAP
	dma_common_free_remap(addr, pool_size);
free_page:
#endif
free_page: __maybe_unused
	if (!dma_release_from_contiguous(NULL, page, 1 << order))
		__free_pages(page, order);
out: