Commit 1ee18de9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - enhance the dma pool to allow atomic allocation on x86 with AMD SEV
   (David Rientjes)

 - two small cleanups (Jason Yan and Peter Collingbourne)

* tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping:
  dma-contiguous: fix comment for dma_release_from_contiguous
  dma-pool: scale the default DMA coherent pool size with memory capacity
  x86/mm: unencrypted non-blocking DMA allocations use coherent pools
  dma-pool: add pool sizes to debugfs
  dma-direct: atomic allocations must come from atomic coherent pools
  dma-pool: dynamically expanding atomic pools
  dma-pool: add additional coherent pools to map to gfp mask
  dma-remap: separate DMA atomic pools from direct remap code
  dma-debug: make __dma_entry_alloc_check_leak() static
parents e542e0dc 298f3db6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1524,6 +1524,7 @@ config X86_CPA_STATISTICS
config AMD_MEM_ENCRYPT
	bool "AMD Secure Memory Encryption (SME) support"
	depends on X86_64 && CPU_SUP_AMD
	select DMA_COHERENT_POOL
	select DYNAMIC_PHYSICAL_MASK
	select ARCH_USE_MEMREMAP_PROT
	select ARCH_HAS_FORCE_DMA_UNENCRYPTED
+3 −2
Original line number Diff line number Diff line
@@ -952,7 +952,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)

	/* Non-coherent atomic allocation? Easy */
	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    dma_free_from_pool(cpu_addr, alloc_size))
	    dma_free_from_pool(dev, cpu_addr, alloc_size))
		return;

	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
@@ -1035,7 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,

	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    !gfpflags_allow_blocking(gfp) && !coherent)
		cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
		cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
					       gfp);
	else
		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
	if (!cpu_addr)
+2 −0
Original line number Diff line number Diff line
@@ -67,6 +67,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
}

u64 dma_direct_get_required_mask(struct device *dev);
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
				  u64 *phys_mask);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+3 −3
Original line number Diff line number Diff line
@@ -630,9 +630,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
			pgprot_t prot, const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size);

bool dma_in_atomic_pool(void *start, size_t size);
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
bool dma_free_from_pool(void *start, size_t size);
void *dma_alloc_from_pool(struct device *dev, size_t size,
			  struct page **ret_page, gfp_t flags);
bool dma_free_from_pool(struct device *dev, void *start, size_t size);

int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+5 −1
Original line number Diff line number Diff line
@@ -79,10 +79,14 @@ config DMA_REMAP
	select DMA_NONCOHERENT_MMAP
	bool

config DMA_DIRECT_REMAP
config DMA_COHERENT_POOL
	bool
	select DMA_REMAP

config DMA_DIRECT_REMAP
	bool
	select DMA_COHERENT_POOL

config DMA_CMA
	bool "DMA Contiguous Memory Allocator"
	depends on HAVE_DMA_CONTIGUOUS && CMA
Loading