Commit e860c299 authored by David Rientjes's avatar David Rientjes Committed by Christoph Hellwig
Browse files

dma-remap: separate DMA atomic pools from direct remap code



DMA atomic pools will be needed beyond only CONFIG_DMA_DIRECT_REMAP so
separate them out into their own file.

This also adds a new Kconfig option that can be subsequently used for
options, such as CONFIG_AMD_MEM_ENCRYPT, that will utilize the coherent
pools but do not have a dependency on direct remapping.

For this patch alone, there is no functional change introduced.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
[hch: fixup copyrights and remove unused includes]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 05f099a7
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -79,10 +79,14 @@ config DMA_REMAP
	select DMA_NONCOHERENT_MMAP
	bool

config DMA_DIRECT_REMAP
config DMA_COHERENT_POOL
	bool
	select DMA_REMAP

config DMA_DIRECT_REMAP
	bool
	select DMA_COHERENT_POOL

config DMA_CMA
	bool "DMA Contiguous Memory Allocator"
	depends on HAVE_DMA_CONTIGUOUS && CMA
+1 −0
Original line number Diff line number Diff line
@@ -6,4 +6,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_VIRT_OPS)		+= virt.o
obj-$(CONFIG_DMA_API_DEBUG)		+= debug.o
obj-$(CONFIG_SWIOTLB)			+= swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL)		+= pool.o
obj-$(CONFIG_DMA_REMAP)			+= remap.o

kernel/dma/pool.c

0 → 100644
+123 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2012 ARM Ltd.
 * Copyright (C) 2020 Google LLC
 */
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
#include <linux/init.h>
#include <linux/genalloc.h>
#include <linux/slab.h>

static struct gen_pool *atomic_pool __ro_after_init;

#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;

static int __init early_coherent_pool(char *p)
{
	atomic_pool_size = memparse(p, &p);
	return 0;
}
early_param("coherent_pool", early_coherent_pool);

static gfp_t dma_atomic_pool_gfp(void)
{
	if (IS_ENABLED(CONFIG_ZONE_DMA))
		return GFP_DMA;
	if (IS_ENABLED(CONFIG_ZONE_DMA32))
		return GFP_DMA32;
	return GFP_KERNEL;
}

static int __init dma_atomic_pool_init(void)
{
	unsigned int pool_size_order = get_order(atomic_pool_size);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	int ret;

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
						 pool_size_order, false);
	else
		page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
	if (!page)
		goto out;

	arch_dma_prep_coherent(page, atomic_pool_size);

	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
	if (!atomic_pool)
		goto free_page;

	addr = dma_common_contiguous_remap(page, atomic_pool_size,
					   pgprot_dmacoherent(PAGE_KERNEL),
					   __builtin_return_address(0));
	if (!addr)
		goto destroy_genpool;

	ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
				page_to_phys(page), atomic_pool_size, -1);
	if (ret)
		goto remove_mapping;
	gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);

	pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
		atomic_pool_size / 1024);
	return 0;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}
postcore_initcall(dma_atomic_pool_init);

bool dma_in_atomic_pool(void *start, size_t size)
{
	if (unlikely(!atomic_pool))
		return false;

	return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
}

void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = pfn_to_page(__phys_to_pfn(phys));
		ptr = (void *)val;
		memset(ptr, 0, size);
	}

	return ptr;
}

bool dma_free_from_pool(void *start, size_t size)
{
	if (!dma_in_atomic_pool(start, size))
		return false;
	gen_pool_free(atomic_pool, (unsigned long)start, size);
	return true;
}
+1 −120
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2012 ARM Ltd.
 * Copyright (c) 2014 The Linux Foundation
 */
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
#include <linux/init.h>
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

@@ -97,117 +92,3 @@ void dma_common_free_remap(void *cpu_addr, size_t size)
	unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
	vunmap(cpu_addr);
}

#ifdef CONFIG_DMA_DIRECT_REMAP
static struct gen_pool *atomic_pool __ro_after_init;

#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;

static int __init early_coherent_pool(char *p)
{
	atomic_pool_size = memparse(p, &p);
	return 0;
}
early_param("coherent_pool", early_coherent_pool);

static gfp_t dma_atomic_pool_gfp(void)
{
	if (IS_ENABLED(CONFIG_ZONE_DMA))
		return GFP_DMA;
	if (IS_ENABLED(CONFIG_ZONE_DMA32))
		return GFP_DMA32;
	return GFP_KERNEL;
}

static int __init dma_atomic_pool_init(void)
{
	unsigned int pool_size_order = get_order(atomic_pool_size);
	unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
	struct page *page;
	void *addr;
	int ret;

	if (dev_get_cma_area(NULL))
		page = dma_alloc_from_contiguous(NULL, nr_pages,
						 pool_size_order, false);
	else
		page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
	if (!page)
		goto out;

	arch_dma_prep_coherent(page, atomic_pool_size);

	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
	if (!atomic_pool)
		goto free_page;

	addr = dma_common_contiguous_remap(page, atomic_pool_size,
					   pgprot_dmacoherent(PAGE_KERNEL),
					   __builtin_return_address(0));
	if (!addr)
		goto destroy_genpool;

	ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
				page_to_phys(page), atomic_pool_size, -1);
	if (ret)
		goto remove_mapping;
	gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);

	pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
		atomic_pool_size / 1024);
	return 0;

remove_mapping:
	dma_common_free_remap(addr, atomic_pool_size);
destroy_genpool:
	gen_pool_destroy(atomic_pool);
	atomic_pool = NULL;
free_page:
	if (!dma_release_from_contiguous(NULL, page, nr_pages))
		__free_pages(page, pool_size_order);
out:
	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
		atomic_pool_size / 1024);
	return -ENOMEM;
}
postcore_initcall(dma_atomic_pool_init);

bool dma_in_atomic_pool(void *start, size_t size)
{
	if (unlikely(!atomic_pool))
		return false;

	return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
}

void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{
	unsigned long val;
	void *ptr = NULL;

	if (!atomic_pool) {
		WARN(1, "coherent pool not initialised!\n");
		return NULL;
	}

	val = gen_pool_alloc(atomic_pool, size);
	if (val) {
		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);

		*ret_page = pfn_to_page(__phys_to_pfn(phys));
		ptr = (void *)val;
		memset(ptr, 0, size);
	}

	return ptr;
}

bool dma_free_from_pool(void *start, size_t size)
{
	if (!dma_in_atomic_pool(start, size))
		return false;
	gen_pool_free(atomic_pool, (unsigned long)start, size);
	return true;
}
#endif /* CONFIG_DMA_DIRECT_REMAP */