Commit d7293f79 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

Merge branch 'for-next/zone-dma' of...

Merge branch 'for-next/zone-dma' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux into dma-mapping-for-next

Pull in a stable branch from the arm64 tree that adds the zone_dma_bits
variable to avoid creating hard to resolve conflicts with that addition.
parents 68a33b17 bff3b044
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -265,6 +265,10 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
	def_bool y

config ZONE_DMA
	bool "Support DMA zone" if EXPERT
	default y

config ZONE_DMA32
	bool "Support DMA32 zone" if EXPERT
	default y
+53 −24
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@
#include <linux/sort.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/efi.h>
@@ -41,6 +42,8 @@
#include <asm/tlb.h>
#include <asm/alternative.h>

#define ARM64_ZONE_DMA_BITS	30

/*
 * We need to be able to catch inadvertent references to memstart_addr
 * that occur (potentially in generic code) before arm64_memblock_init()
@@ -56,7 +59,14 @@ EXPORT_SYMBOL(physvirt_offset);
struct page *vmemmap __ro_after_init;
EXPORT_SYMBOL(vmemmap);

/*
 * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
 * memory as some devices, namely the Raspberry Pi 4, have peripherals with
 * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
 * bit addressable memory area.
 */
phys_addr_t arm64_dma_phys_limit __ro_after_init;
static phys_addr_t arm64_dma32_phys_limit __ro_after_init;

#ifdef CONFIG_KEXEC_CORE
/*
@@ -81,7 +91,7 @@ static void __init reserve_crashkernel(void)

	if (crash_base == 0) {
		/* Current arm64 boot protocol requires 2MB alignment */
		crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
		crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
				crash_size, SZ_2M);
		if (crash_base == 0) {
			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -169,15 +179,16 @@ static void __init reserve_elfcorehdr(void)
{
}
#endif /* CONFIG_CRASH_DUMP */

/*
 * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
 * currently assumes that for memory starting above 4G, 32-bit devices will
 * use a DMA offset.
 * Return the maximum physical address for a zone with a given address size
 * limit. It currently assumes that for memory starting above 4G, 32-bit
 * devices will use a DMA offset.
 */
static phys_addr_t __init max_zone_dma_phys(void)
static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
{
	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
	return min(offset + (1ULL << 32), memblock_end_of_DRAM());
	phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits);
	return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
}

#ifdef CONFIG_NUMA
@@ -186,8 +197,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};

#ifdef CONFIG_ZONE_DMA
	max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
#endif
	max_zone_pfns[ZONE_NORMAL] = max;

@@ -200,16 +214,21 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
	struct memblock_region *reg;
	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
	unsigned long max_dma = min;
	unsigned long max_dma32 = min;
	unsigned long __maybe_unused max_dma = min;

	memset(zone_size, 0, sizeof(zone_size));

	/* 4GB maximum for 32-bit only capable devices */
#ifdef CONFIG_ZONE_DMA32
#ifdef CONFIG_ZONE_DMA
	max_dma = PFN_DOWN(arm64_dma_phys_limit);
	zone_size[ZONE_DMA32] = max_dma - min;
	zone_size[ZONE_DMA] = max_dma - min;
	max_dma32 = max_dma;
#endif
#ifdef CONFIG_ZONE_DMA32
	max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
	zone_size[ZONE_DMA32] = max_dma32 - max_dma;
#endif
	zone_size[ZONE_NORMAL] = max - max_dma;
	zone_size[ZONE_NORMAL] = max - max_dma32;

	memcpy(zhole_size, zone_size, sizeof(zhole_size));

@@ -219,16 +238,22 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)

		if (start >= max)
			continue;

#ifdef CONFIG_ZONE_DMA32
#ifdef CONFIG_ZONE_DMA
		if (start < max_dma) {
			unsigned long dma_end = min(end, max_dma);
			zhole_size[ZONE_DMA32] -= dma_end - start;
			unsigned long dma_end = min_not_zero(end, max_dma);
			zhole_size[ZONE_DMA] -= dma_end - start;
		}
#endif
		if (end > max_dma) {
#ifdef CONFIG_ZONE_DMA32
		if (start < max_dma32) {
			unsigned long dma32_end = min(end, max_dma32);
			unsigned long dma32_start = max(start, max_dma);
			zhole_size[ZONE_DMA32] -= dma32_end - dma32_start;
		}
#endif
		if (end > max_dma32) {
			unsigned long normal_end = min(end, max);
			unsigned long normal_start = max(start, max_dma);
			unsigned long normal_start = max(start, max_dma32);
			zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
		}
	}
@@ -418,11 +443,15 @@ void __init arm64_memblock_init(void)

	early_init_fdt_scan_reserved_mem();

	/* 4GB maximum for 32-bit only capable devices */
	if (IS_ENABLED(CONFIG_ZONE_DMA)) {
		zone_dma_bits = ARM64_ZONE_DMA_BITS;
		arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
	}

	if (IS_ENABLED(CONFIG_ZONE_DMA32))
		arm64_dma_phys_limit = max_zone_dma_phys();
		arm64_dma32_phys_limit = max_zone_phys(32);
	else
		arm64_dma_phys_limit = PHYS_MASK + 1;
		arm64_dma32_phys_limit = PHYS_MASK + 1;

	reserve_crashkernel();

@@ -430,7 +459,7 @@ void __init arm64_memblock_init(void)

	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;

	dma_contiguous_reserve(arm64_dma_phys_limit);
	dma_contiguous_reserve(arm64_dma32_phys_limit);
}

void __init bootmem_init(void)
@@ -534,7 +563,7 @@ static void __init free_unused_memmap(void)
void __init mem_init(void)
{
	if (swiotlb_force == SWIOTLB_FORCE ||
	    max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
	    max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
		swiotlb_init(1);
	else
		swiotlb_force = SWIOTLB_NO_FORCE;
+0 −9
Original line number Diff line number Diff line
@@ -329,13 +329,4 @@ struct vm_area_struct;
#endif /* __ASSEMBLY__ */
#include <asm/slice.h>

/*
 * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.
 */
#ifdef CONFIG_PPC32
#define ARCH_ZONE_DMA_BITS 30
#else
#define ARCH_ZONE_DMA_BITS 31
#endif

#endif /* _ASM_POWERPC_PAGE_H */
+15 −5
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/memremap.h>
#include <linux/dma-direct.h>

#include <asm/pgalloc.h>
#include <asm/prom.h>
@@ -201,10 +202,10 @@ static int __init mark_nonram_nosave(void)
 * everything else. GFP_DMA32 page allocations automatically fall back to
 * ZONE_DMA.
 *
 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
 * inform the generic DMA mapping code.  32-bit only devices (if not handled
 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
 * otherwise served by ZONE_DMA.
 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
 * generic DMA mapping code.  32-bit only devices (if not handled by an IOMMU
 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
 * ZONE_DMA.
 */
static unsigned long max_zone_pfns[MAX_NR_ZONES];

@@ -237,9 +238,18 @@ void __init paging_init(void)
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
	       (long int)((top_of_ram - total_ram) >> 20));

	/*
	 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
	 * powerbooks.
	 */
	if (IS_ENABLED(CONFIG_PPC32))
		zone_dma_bits = 30;
	else
		zone_dma_bits = 31;

#ifdef CONFIG_ZONE_DMA
	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
				      1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
				      1UL << (zone_dma_bits - PAGE_SHIFT));
#endif
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
+0 −2
Original line number Diff line number Diff line
@@ -177,8 +177,6 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | \
				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#define ARCH_ZONE_DMA_BITS	31

#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>

Loading