Commit 697ece78 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman
Browse files

powerpc/32s: reorder Linux PTE bits to better match Hash PTE bits.



Reorder Linux PTE bits to (almost) match Hash PTE bits.

RW Kernel : PP = 00
RO Kernel : PP = 00
RW User   : PP = 01
RO User   : PP = 11

So naturally, we should have
_PAGE_USER = 0x001
_PAGE_RW   = 0x002

Today 0x001 and 0x002 and _PAGE_PRESENT and _PAGE_HASHPTE which
both are software only bits.

Switch _PAGE_USER and _PAGE_PRESET
Switch _PAGE_RW and _PAGE_HASHPTE

This allows to remove a few insns.

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/c4d6c18a7f8d9d3b899bc492f55fbc40ef38896a.1583861325.git.christophe.leroy@c-s.fr
parent af92bad6
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -17,9 +17,9 @@
 * updating the accessed and modified bits in the page table tree.
 */

#define _PAGE_PRESENT	0x001	/* software: pte contains a translation */
#define _PAGE_HASHPTE	0x002	/* hash_page has made an HPTE for this pte */
#define _PAGE_USER	0x004	/* usermode access allowed */
#define _PAGE_USER	0x001	/* usermode access allowed */
#define _PAGE_RW	0x002	/* software: user write access allowed */
#define _PAGE_PRESENT	0x004	/* software: pte contains a translation */
#define _PAGE_GUARDED	0x008	/* G: prohibit speculative access */
#define _PAGE_COHERENT	0x010	/* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE	0x020	/* I: cache inhibit */
@@ -27,7 +27,7 @@
#define _PAGE_DIRTY	0x080	/* C: page changed */
#define _PAGE_ACCESSED	0x100	/* R: page referenced */
#define _PAGE_EXEC	0x200	/* software: exec allowed */
#define _PAGE_RW	0x400	/* software: user write access allowed */
#define _PAGE_HASHPTE	0x400	/* hash_page has made an HPTE for this pte */
#define _PAGE_SPECIAL	0x800	/* software: Special page */

#ifdef CONFIG_PTE_64BIT
+3 −6
Original line number Diff line number Diff line
@@ -348,7 +348,7 @@ BEGIN_MMU_FTR_SECTION
	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
#endif
	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
	rlwinm	r3, r5, 32 - 24, 30, 30		/* DSISR_STORE -> _PAGE_RW */
	bl	hash_page
	b	handle_page_fault_tramp_1
FTR_SECTION_ELSE
@@ -497,7 +497,6 @@ InstructionTLBMiss:
	andc.	r1,r1,r0		/* check access & ~permission */
	bne-	InstructionAddressInvalid /* return if access not permitted */
	/* Convert linux-style PTE to low word of PPC-style PTE */
	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
	ori	r1, r1, 0xe06		/* clear out reserved bits */
	andc	r1, r0, r1		/* PP = user? 1 : 0 */
BEGIN_FTR_SECTION
@@ -565,9 +564,8 @@ DataLoadTLBMiss:
	 * we would need to update the pte atomically with lwarx/stwcx.
	 */
	/* Convert linux-style PTE to low word of PPC-style PTE */
	rlwinm	r1,r0,32-9,30,30	/* _PAGE_RW -> PP msb */
	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
	rlwinm	r1,r0,0,30,30		/* _PAGE_RW -> PP msb */
	rlwimi	r0,r0,1,30,30		/* _PAGE_USER -> PP msb */
	ori	r1,r1,0xe04		/* clear out reserved bits */
	andc	r1,r0,r1		/* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION
@@ -645,7 +643,6 @@ DataStoreTLBMiss:
	 * we would need to update the pte atomically with lwarx/stwcx.
	 */
	/* Convert linux-style PTE to low word of PPC-style PTE */
	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
	li	r1,0xe06		/* clear out reserved bits & PP msb */
	andc	r1,r0,r1		/* PP = user? 1: 0 */
BEGIN_FTR_SECTION
+6 −8
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ mmu_hash_lock:
/*
 * Load a PTE into the hash table, if possible.
 * The address is in r4, and r3 contains an access flag:
 * _PAGE_RW (0x400) if a write.
 * _PAGE_RW (0x002) if a write.
 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
 * SPRG_THREAD contains the physical address of the current task's thread.
 *
@@ -69,7 +69,7 @@ _GLOBAL(hash_page)
	blt+	112f			/* assume user more likely */
	lis	r5, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
	addi	r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
	rlwimi	r3,r9,32-12,29,29	/* MSR_PR -> _PAGE_USER */
	rlwimi	r3,r9,32-14,31,31	/* MSR_PR -> _PAGE_USER */
112:
#ifndef CONFIG_PTE_64BIT
	rlwimi	r5,r4,12,20,29		/* insert top 10 bits of address */
@@ -94,7 +94,7 @@ _GLOBAL(hash_page)
#else
	rlwimi	r8,r4,23,20,28		/* compute pte address */
#endif
	rlwinm	r0,r3,32-3,24,24	/* _PAGE_RW access -> _PAGE_DIRTY */
	rlwinm	r0,r3,6,24,24		/* _PAGE_RW access -> _PAGE_DIRTY */
	ori	r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE

	/*
@@ -310,11 +310,9 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)

_GLOBAL(create_hpte)
	/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
	rlwinm	r8,r5,32-9,30,30	/* _PAGE_RW -> PP msb */
	rlwinm	r0,r5,32-6,30,30	/* _PAGE_DIRTY -> PP msb */
	and	r8,r8,r0		/* writable if _RW & _DIRTY */
	rlwimi	r5,r5,32-1,30,30	/* _PAGE_USER -> PP msb */
	rlwimi	r5,r5,32-2,31,31	/* _PAGE_USER -> PP lsb */
	and	r8,r5,r0		/* writable if _RW & _DIRTY */
	rlwimi	r5,r5,1,30,30		/* _PAGE_USER -> PP msb */
	ori	r8,r8,0xe04		/* clear out reserved bits */
	andc	r8,r5,r8		/* PP = user? (rw&dirty? 1: 3): 0 */
BEGIN_FTR_SECTION
@@ -566,7 +564,7 @@ _GLOBAL(flush_hash_pages)
33:	lwarx	r8,0,r5			/* fetch the pte flags word */
	andi.	r0,r8,_PAGE_HASHPTE
	beq	8f			/* done if HASHPTE is already clear */
	rlwinm	r8,r8,0,31,29		/* clear HASHPTE bit */
	rlwinm	r8,r8,0,~_PAGE_HASHPTE	/* clear HASHPTE bit */
	stwcx.	r8,0,r5			/* update the pte */
	bne-	33b