Commit 0034d395 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/mm/hash64: Map all the kernel regions in the same 0xc range



This patch maps vmalloc, IO and vmemap regions in the 0xc address range
instead of the current 0xd and 0xf range. This brings the mapping closer
to radix translation mode.

With hash 64K page size each of this region is 512TB whereas with 4K config
we are limited by the max page table range of 64TB and hence there regions
are of 16TB size.

The kernel mapping is now:

 On 4K hash

     kernel_region_map_size = 16TB
     kernel vmalloc start   = 0xc000100000000000
     kernel IO start        = 0xc000200000000000
     kernel vmemmap start   = 0xc000300000000000

64K hash, 64K radix and 4k radix:

     kernel_region_map_size = 512TB
     kernel vmalloc start   = 0xc008000000000000
     kernel IO start        = 0xc00a000000000000
     kernel vmemmap start   = 0xc00c000000000000

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent a35a3c6f
Loading
Loading
Loading
Loading
+13 −0
Original line number Diff line number Diff line
@@ -13,6 +13,19 @@
 */
#define MAX_EA_BITS_PER_CONTEXT		46

/*
 * Our page table limit us to 64TB. Hence for the kernel mapping,
 * each MAP area is limited to 16 TB.
 * The four map areas are:  linear mapping, vmap, IO and vmemmap
 */
#define H_KERN_MAP_SIZE		(ASM_CONST(1) << (MAX_EA_BITS_PER_CONTEXT - 2))

/*
 * Define the address range of the kernel non-linear virtual area
 * 16TB
 */
#define H_KERN_VIRT_START	ASM_CONST(0xc000100000000000)

#ifndef __ASSEMBLY__
#define H_PTE_TABLE_SIZE	(sizeof(pte_t) << H_PTE_INDEX_SIZE)
#define H_PMD_TABLE_SIZE	(sizeof(pmd_t) << H_PMD_INDEX_SIZE)
+11 −0
Original line number Diff line number Diff line
@@ -14,6 +14,17 @@
 */
#define MAX_EA_BITS_PER_CONTEXT		49

/*
 * We use one context for each MAP area.
 */
#define H_KERN_MAP_SIZE		(1UL << MAX_EA_BITS_PER_CONTEXT)

/*
 * Define the address range of the kernel non-linear virtual area
 * 2PB
 */
#define H_KERN_VIRT_START	ASM_CONST(0xc008000000000000)

/*
 * 64k aligned address free up few of the lower bits of RPN for us
 * We steal that here. For more deatils look at pte_pfn/pfn_pte()
+61 −34
Original line number Diff line number Diff line
@@ -29,6 +29,10 @@
#define H_PGTABLE_EADDR_SIZE	(H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
#define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
/*
 * Top 2 bits are ignored in page table walk.
 */
#define EA_MASK			(~(0xcUL << 60))

/*
 * We store the slot details in the second half of page table.
@@ -42,53 +46,56 @@
#endif

/*
 * Define the address range of the kernel non-linear virtual area. In contrast
 * to the linear mapping, this is managed using the kernel page tables and then
 * inserted into the hash page table to actually take effect, similarly to user
 * mappings.
 */
#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)

/*
 * Allow virtual mapping of one context size.
 * 512TB for 64K page size
 * 64TB for 4K page size
 * +------------------------------+
 * |                              |
 * |                              |
 * |                              |
 * +------------------------------+  Kernel virtual map end (0xc00e000000000000)
 * |                              |
 * |                              |
 * |      512TB/16TB of vmemmap   |
 * |                              |
 * |                              |
 * +------------------------------+  Kernel vmemmap  start
 * |                              |
 * |      512TB/16TB of IO map    |
 * |                              |
 * +------------------------------+  Kernel IO map start
 * |                              |
 * |      512TB/16TB of vmap      |
 * |                              |
 * +------------------------------+  Kernel virt start (0xc008000000000000)
 * |                              |
 * |                              |
 * |                              |
 * +------------------------------+  Kernel linear (0xc.....)
 */
#define H_KERN_VIRT_SIZE (1UL << MAX_EA_BITS_PER_CONTEXT)

/*
 * 8TB IO mapping size
 */
#define H_KERN_IO_SIZE ASM_CONST(0x80000000000) /* 8T */

/*
 * The vmalloc space starts at the beginning of the kernel non-linear virtual
 * region, and occupies 504T (64K) or 56T (4K)
 */
#define H_VMALLOC_START		H_KERN_VIRT_START
#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE - H_KERN_IO_SIZE)
#define H_VMALLOC_SIZE		H_KERN_MAP_SIZE
#define H_VMALLOC_END		(H_VMALLOC_START + H_VMALLOC_SIZE)

#define H_KERN_IO_START		H_VMALLOC_END
#define H_KERN_IO_END	(H_KERN_VIRT_START + H_KERN_VIRT_SIZE)
#define H_KERN_IO_SIZE		H_KERN_MAP_SIZE
#define H_KERN_IO_END		(H_KERN_IO_START + H_KERN_IO_SIZE)

#define H_VMEMMAP_START		H_KERN_IO_END
#define H_VMEMMAP_SIZE		H_KERN_MAP_SIZE
#define H_VMEMMAP_END		(H_VMEMMAP_START + H_VMEMMAP_SIZE)

/*
 * Region IDs
 */
#define REGION_SHIFT		60UL
#define REGION_MASK		(0xfUL << REGION_SHIFT)
#define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)

#define VMALLOC_REGION_ID	(REGION_ID(H_VMALLOC_START))
#define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
#define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
#define USER_REGION_ID		(0UL)
#define USER_REGION_ID		1
#define KERNEL_REGION_ID	2
#define VMALLOC_REGION_ID	3
#define IO_REGION_ID		4
#define VMEMMAP_REGION_ID	5

/*
 * Defines the address of the vmemap area, in its own region on
 * hash table CPUs.
 */
#define H_VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)

#ifdef CONFIG_PPC_MM_SLICES
#define HAVE_ARCH_UNMAPPED_AREA
@@ -104,6 +111,26 @@
#define H_PUD_BAD_BITS		(PMD_TABLE_SIZE-1)

#ifndef __ASSEMBLY__
static inline int get_region_id(unsigned long ea)
{
	int id = (ea >> 60UL);

	if (id == 0)
		return USER_REGION_ID;

	VM_BUG_ON(id != 0xc);
	VM_BUG_ON(ea >= H_VMEMMAP_END);

	if (ea >= H_VMEMMAP_START)
		return VMEMMAP_REGION_ID;
	else if (ea >= H_KERN_IO_START)
		return IO_REGION_ID;
	else if (ea >= H_VMALLOC_START)
		return VMALLOC_REGION_ID;

	return KERNEL_REGION_ID;
}

#define	hash__pmd_bad(pmd)		(pmd_val(pmd) & H_PMD_BAD_BITS)
#define	hash__pud_bad(pud)		(pud_val(pud) & H_PUD_BAD_BITS)
static inline int hash__pgd_bad(pgd_t pgd)
+15 −16
Original line number Diff line number Diff line
@@ -588,7 +588,8 @@ extern void slb_set_size(u16 size);
#endif

#define MAX_VMALLOC_CTX_CNT	1
#define MAX_MEMMAP_CTX_CNT	1
#define MAX_IO_CTX_CNT		1
#define MAX_VMEMMAP_CTX_CNT	1

/*
 * 256MB segment
@@ -601,13 +602,10 @@ extern void slb_set_size(u16 size);
 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
 * because of the modulo operation in vsid scramble.
 *
 * We add one extra context to MIN_USER_CONTEXT so that we can map kernel
 * context easily. The +1 is to map the unused 0xe region mapping.
 */
#define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 2)
#define MIN_USER_CONTEXT	(MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
				 MAX_MEMMAP_CTX_CNT + 2)

				 MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT)
/*
 * For platforms that support on 65bit VA we limit the context bits
 */
@@ -776,7 +774,7 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
	/*
	 * Bad address. We return VSID 0 for that
	 */
	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
	if ((ea & EA_MASK)  >= H_PGTABLE_RANGE)
		return 0;

	if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
@@ -803,28 +801,29 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
 * 0x00002 -  [ 0xc002000000000000 - 0xc003ffffffffffff]
 * 0x00003 -  [ 0xc004000000000000 - 0xc005ffffffffffff]
 * 0x00004 -  [ 0xc006000000000000 - 0xc007ffffffffffff]

 * 0x00005 -  [ 0xd000000000000000 - 0xd001ffffffffffff ]
 * 0x00006 -  Not used - Can map 0xe000000000000000 range.
 * 0x00007 -  [ 0xf000000000000000 - 0xf001ffffffffffff ]
 *
 * So we can compute the context from the region (top nibble) by
 * subtracting 11, or 0xc - 1.
 * vmap, IO, vmemap
 *
 * 0x00005 -  [ 0xc008000000000000 - 0xc009ffffffffffff]
 * 0x00006 -  [ 0xc00a000000000000 - 0xc00bffffffffffff]
 * 0x00007 -  [ 0xc00c000000000000 - 0xc00dffffffffffff]
 *
 */
static inline unsigned long get_kernel_context(unsigned long ea)
{
	unsigned long region_id = REGION_ID(ea);
	unsigned long region_id = get_region_id(ea);
	unsigned long ctx;
	/*
	 * For linear mapping we do support multiple context
	 * Depending on Kernel config, kernel region can have one context
	 * or more.
	 */
	if (region_id == KERNEL_REGION_ID) {
		/*
		 * We already verified ea to be not beyond the addr limit.
		 */
		ctx =  1 + ((ea & ~REGION_MASK) >> MAX_EA_BITS_PER_CONTEXT);
		ctx =  1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
	} else
		ctx = (region_id - 0xc) + MAX_KERNEL_CTX_CNT;
		ctx = region_id + MAX_KERNEL_CTX_CNT - 2;
	return ctx;
}

+0 −1
Original line number Diff line number Diff line
@@ -279,7 +279,6 @@ extern unsigned long __kernel_virt_size;
extern unsigned long __kernel_io_start;
extern unsigned long __kernel_io_end;
#define KERN_VIRT_START __kernel_virt_start
#define KERN_VIRT_SIZE  __kernel_virt_size
#define KERN_IO_START  __kernel_io_start
#define KERN_IO_END __kernel_io_end

Loading